repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/object_detection/tpu_exporters/export_saved_model_tpu.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python binary for exporting SavedModel, tailored for TPU inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.tpu_exporters import export_saved_model_tpu_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('pipeline_config_file', None,
'A pipeline_pb2.TrainEvalPipelineConfig config file.')
flags.DEFINE_string(
'ckpt_path', None, 'Path to trained checkpoint, typically of the form '
'path/to/model.ckpt')
flags.DEFINE_string('export_dir', None, 'Path to export SavedModel.')
flags.DEFINE_string('input_placeholder_name', 'placeholder_tensor',
'Name of input placeholder in model\'s signature_def_map.')
flags.DEFINE_string(
'input_type', 'tf_example', 'Type of input node. Can be '
'one of [`image_tensor`, `encoded_image_string_tensor`, '
'`tf_example`]')
flags.DEFINE_boolean('use_bfloat16', False, 'If true, use tf.bfloat16 on TPU.')
def main(argv):
if len(argv) > 1:
raise tf.app.UsageError('Too many command-line arguments.')
export_saved_model_tpu_lib.export(FLAGS.pipeline_config_file, FLAGS.ckpt_path,
FLAGS.export_dir,
FLAGS.input_placeholder_name,
FLAGS.input_type, FLAGS.use_bfloat16)
if __name__ == '__main__':
tf.app.flags.mark_flag_as_required('pipeline_config_file')
tf.app.flags.mark_flag_as_required('ckpt_path')
tf.app.flags.mark_flag_as_required('export_dir')
tf.app.run()
| 2,315 | 41.109091 | 80 | py |
models | models-master/research/object_detection/tpu_exporters/testdata/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/object_detection/protos/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/packages/tf1/setup.py | """Setup script for object_detection with TF1.0."""
import os
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython',
'contextlib2', 'tf-slim', 'six', 'pycocotools', 'lvis',
'scipy', 'pandas']
setup(
name='object_detection',
version='0.1',
install_requires=REQUIRED_PACKAGES,
include_package_data=True,
packages=(
[p for p in find_packages() if p.startswith('object_detection')] +
find_packages(where=os.path.join('.', 'slim'))),
package_dir={
'datasets': os.path.join('slim', 'datasets'),
'nets': os.path.join('slim', 'nets'),
'preprocessing': os.path.join('slim', 'preprocessing'),
'deployment': os.path.join('slim', 'deployment'),
'scripts': os.path.join('slim', 'scripts'),
},
description='Tensorflow Object Detection Library with TF1.0',
python_requires='>3.6',
)
| 978 | 33.964286 | 76 | py |
models | models-master/research/object_detection/packages/tf2/setup.py | """Setup script for object_detection with TF2.0."""
import os
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
# Required for apache-beam with PY3
'avro-python3',
'apache-beam',
'pillow',
'lxml',
'matplotlib',
'Cython',
'contextlib2',
'tf-slim',
'six',
'pycocotools',
'lvis',
'scipy',
'pandas',
'tf-models-official>=2.5.1',
'tensorflow_io',
'keras',
'pyparsing==2.4.7', # TODO(b/204103388)
'sacrebleu<=2.2.0' # https://github.com/mjpost/sacrebleu/issues/209
]
setup(
name='object_detection',
version='0.1',
install_requires=REQUIRED_PACKAGES,
include_package_data=True,
packages=(
[p for p in find_packages() if p.startswith('object_detection')] +
find_packages(where=os.path.join('.', 'slim'))),
package_dir={
'datasets': os.path.join('slim', 'datasets'),
'nets': os.path.join('slim', 'nets'),
'preprocessing': os.path.join('slim', 'preprocessing'),
'deployment': os.path.join('slim', 'deployment'),
'scripts': os.path.join('slim', 'scripts'),
},
description='Tensorflow Object Detection Library',
python_requires='>3.6',
)
| 1,236 | 25.891304 | 74 | py |
models | models-master/research/object_detection/box_coders/square_box_coder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square box coder.
Square box coder follows the coding schema described below:
l = sqrt(h * w)
la = sqrt(ha * wa)
ty = (y - ya) / la
tx = (x - xa) / la
tl = log(l / la)
where x, y, w, h denote the box's center coordinates, width, and height,
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tl denote the anchor-encoded
center, and length, respectively. Because the encoded box is a square, only
one length is encoded.
This has shown to provide performance improvements over the Faster RCNN box
coder when the objects being detected tend to be square (e.g. faces) and when
the input images are not distorted via resizing.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
EPSILON = 1e-8
class SquareBoxCoder(box_coder.BoxCoder):
"""Encodes a 3-scalar representation of a square box."""
def __init__(self, scale_factors=None):
"""Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equal to 0.
"""
if scale_factors:
if len(scale_factors) != 3:
raise ValueError('The argument scale_factors must be a list of length '
'3.')
if any(scalar <= 0 for scalar in scale_factors):
raise ValueError('The values in scale_factors must all be greater '
'than 0.')
self._scale_factors = scale_factors
@property
def code_size(self):
return 3
def _encode(self, boxes, anchors):
"""Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
l = tf.sqrt(h * w)
# Avoid NaN in division and log below.
la += EPSILON
l += EPSILON
tx = (xcenter - xcenter_a) / la
ty = (ycenter - ycenter_a) / la
tl = tf.log(l / la)
# Scales location targets for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
tl *= self._scale_factors[2]
return tf.transpose(tf.stack([ty, tx, tl]))
def _decode(self, rel_codes, anchors):
"""Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ty, tx, tl = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
tl /= self._scale_factors[2]
l = tf.exp(tl) * la
ycenter = ty * la + ycenter_a
xcenter = tx * la + xcenter_a
ymin = ycenter - l / 2.
xmin = xcenter - l / 2.
ymax = ycenter + l / 2.
xmax = xcenter + l / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
| 4,286 | 32.755906 | 80 | py |
models | models-master/research/object_detection/box_coders/mean_stddev_box_coder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.mean_stddev_boxcoder."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.utils import test_case
class MeanStddevBoxCoderTest(test_case.TestCase):
def testGetCorrectRelativeCodesAfterEncoding(self):
boxes = np.array([[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]], np.float32)
anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32)
expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]]
def graph_fn(boxes, anchors):
anchors = box_list.BoxList(anchors)
boxes = box_list.BoxList(boxes)
coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def testGetCorrectBoxesAfterDecoding(self):
rel_codes = np.array([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]],
np.float32)
expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]]
anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32)
def graph_fn(rel_codes, anchors):
anchors = box_list.BoxList(anchors)
coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
decoded_boxes = coder.decode(rel_codes, anchors).get()
return decoded_boxes
decoded_boxes_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(decoded_boxes_out, expected_box_corners, rtol=1e-04,
atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 2,498 | 39.306452 | 80 | py |
models | models-master/research/object_detection/box_coders/keypoint_box_coder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keypoint box coder.
The keypoint box coder follows the coding schema described below (this is
similar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition
to box coordinates):
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
tky0 = (ky0 - ya) / ha
tkx0 = (kx0 - xa) / wa
tky1 = (ky1 - ya) / ha
tkx1 = (kx1 - xa) / wa
...
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the
keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the
anchor-encoded keypoint coordinates.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import standard_fields as fields
EPSILON = 1e-8
class KeypointBoxCoder(box_coder.BoxCoder):
"""Keypoint box coder."""
def __init__(self, num_keypoints, scale_factors=None):
"""Constructor for KeypointBoxCoder.
Args:
num_keypoints: Number of keypoints to encode/decode.
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
In addition to scaling ty and tx, the first 2 scalars are used to scale
the y and x coordinates of the keypoints as well. If set to None, does
not perform scaling.
"""
self._num_keypoints = num_keypoints
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
self._keypoint_scale_factors = None
if scale_factors is not None:
self._keypoint_scale_factors = tf.expand_dims(
tf.tile([
tf.cast(scale_factors[0], dtype=tf.float32),
tf.cast(scale_factors[1], dtype=tf.float32)
], [num_keypoints]), 1)
@property
def code_size(self):
return 4 + self._num_keypoints * 2
def _encode(self, boxes, anchors):
"""Encode a box and keypoint collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are
tensors with the shape [N, 4], and keypoints are tensors with the shape
[N, num_keypoints, 2].
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0
represent the y and x coordinates of the first keypoint, tky1 and tkx1
represent the y and x coordinates of the second keypoint, and so on.
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
keypoints = boxes.get_field(fields.BoxListFields.keypoints)
keypoints = tf.transpose(tf.reshape(keypoints,
[-1, self._num_keypoints * 2]))
num_boxes = boxes.num_boxes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
tiled_anchor_centers = tf.tile(
tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])
tiled_anchor_sizes = tf.tile(
tf.stack([ha, wa]), [self._num_keypoints, 1])
tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes])
tboxes = tf.stack([ty, tx, th, tw])
return tf.transpose(tf.concat([tboxes, tkeypoints], 0))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes and keypoints.
Args:
rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N
anchor-encoded boxes and keypoints
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes and keypoints.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
num_codes = tf.shape(rel_codes)[0]
result = tf.unstack(tf.transpose(rel_codes))
ty, tx, th, tw = result[:4]
tkeypoints = result[4:]
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes])
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
decoded_boxes_keypoints = box_list.BoxList(
tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
tiled_anchor_centers = tf.tile(
tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])
tiled_anchor_sizes = tf.tile(
tf.stack([ha, wa]), [self._num_keypoints, 1])
keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers
keypoints = tf.reshape(tf.transpose(keypoints),
[-1, self._num_keypoints, 2])
decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints)
return decoded_boxes_keypoints
| 6,414 | 35.867816 | 80 | py |
models | models-master/research/object_detection/box_coders/faster_rcnn_box_coder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
| 3,847 | 31.336134 | 80 | py |
models | models-master/research/object_detection/box_coders/mean_stddev_box_coder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mean stddev box coder.
This box coder use the following coding schema to encode boxes:
rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
"""
from object_detection.core import box_coder
from object_detection.core import box_list
class MeanStddevBoxCoder(box_coder.BoxCoder):
"""Mean stddev box coder."""
def __init__(self, stddev=0.01):
"""Constructor for MeanStddevBoxCoder.
Args:
stddev: The standard deviation used to encode and decode boxes.
"""
self._stddev = stddev
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of N anchors.
Returns:
a tensor representing N anchor-encoded boxes
Raises:
ValueError: if the anchors still have deprecated stddev field.
"""
box_corners = boxes.get()
if anchors.has_field('stddev'):
raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
"should not be specified in the box list.")
means = anchors.get()
return (box_corners - means) / self._stddev
def _decode(self, rel_codes, anchors):
"""Decode.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes
Raises:
ValueError: if the anchors still have deprecated stddev field and expects
the decode method to use stddev value from that field.
"""
means = anchors.get()
if anchors.has_field('stddev'):
raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
"should not be specified in the box list.")
box_corners = rel_codes * self._stddev + means
return box_list.BoxList(box_corners)
| 2,592 | 31.4125 | 80 | py |
models | models-master/research/object_detection/box_coders/keypoint_box_coder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.keypoint_box_coder."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.core import box_list
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
class KeypointBoxCoderTest(test_case.TestCase):
def test_get_correct_relative_codes_after_encoding(self):
boxes = np.array([[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]], np.float32)
keypoints = np.array([[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]], np.float32)
num_keypoints = len(keypoints[0])
anchors = np.array([[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]], np.float32)
expected_rel_codes = [
[-0.5, -0.416666, -0.405465, -0.182321,
-0.5, -0.5, -0.833333, 0.],
[-0.083333, -0.222222, -0.693147, -1.098612,
0.166667, -0.166667, -0.333333, -0.055556]
]
def graph_fn(boxes, keypoints, anchors):
boxes = box_list.BoxList(boxes)
boxes.add_field(fields.BoxListFields.keypoints, keypoints)
anchors = box_list.BoxList(anchors)
coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_get_correct_relative_codes_after_encoding_with_scaling(self):
boxes = np.array([[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]], np.float32)
keypoints = np.array([[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]], np.float32)
num_keypoints = len(keypoints[0])
anchors = np.array([[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]], np.float32)
expected_rel_codes = [
[-1., -1.25, -1.62186, -0.911608,
-1.0, -1.5, -1.666667, 0.],
[-0.166667, -0.666667, -2.772588, -5.493062,
0.333333, -0.5, -0.666667, -0.166667]
]
def graph_fn(boxes, keypoints, anchors):
scale_factors = [2, 3, 4, 5]
boxes = box_list.BoxList(boxes)
boxes.add_field(fields.BoxListFields.keypoints, keypoints)
anchors = box_list.BoxList(anchors)
coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints, scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_get_correct_boxes_after_decoding(self):
anchors = np.array([[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]], np.float32)
rel_codes = np.array([
[-0.5, -0.416666, -0.405465, -0.182321,
-0.5, -0.5, -0.833333, 0.],
[-0.083333, -0.222222, -0.693147, -1.098612,
0.166667, -0.166667, -0.333333, -0.055556]
], np.float32)
expected_boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
expected_keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(expected_keypoints[0])
def graph_fn(rel_codes, anchors):
anchors = box_list.BoxList(anchors)
coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints)
boxes = coder.decode(rel_codes, anchors)
return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints)
boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04,
atol=1e-04)
self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,
atol=1e-04)
def test_get_correct_boxes_after_decoding_with_scaling(self):
anchors = np.array([[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]], np.float32)
rel_codes = np.array([
[-1., -1.25, -1.62186, -0.911608,
-1.0, -1.5, -1.666667, 0.],
[-0.166667, -0.666667, -2.772588, -5.493062,
0.333333, -0.5, -0.666667, -0.166667]
], np.float32)
expected_boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
expected_keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(expected_keypoints[0])
def graph_fn(rel_codes, anchors):
scale_factors = [2, 3, 4, 5]
anchors = box_list.BoxList(anchors)
coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints, scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints)
boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04,
atol=1e-04)
self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,
atol=1e-04)
def test_very_small_width_nan_after_encoding(self):
boxes = np.array([[10., 10., 10.0000001, 20.]], np.float32)
keypoints = np.array([[[10., 10.], [10.0000001, 20.]]], np.float32)
anchors = np.array([[15., 12., 30., 18.]], np.float32)
expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826,
-0.833333, -0.833333, -0.833333, 0.833333]]
def graph_fn(boxes, keypoints, anchors):
boxes = box_list.BoxList(boxes)
boxes.add_field(fields.BoxListFields.keypoints, keypoints)
anchors = box_list.BoxList(anchors)
coder = keypoint_box_coder.KeypointBoxCoder(2)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 6,799 | 43.736842 | 80 | py |
models | models-master/research/object_detection/box_coders/square_box_coder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.square_box_coder."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import square_box_coder
from object_detection.core import box_list
from object_detection.utils import test_case
class SquareBoxCoderTest(test_case.TestCase):
def test_correct_relative_codes_with_default_scale(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
expected_rel_codes = [[-0.790569, -0.263523, -0.293893],
[-0.068041, -0.272166, -0.89588]]
def graph_fn(boxes, anchors):
scale_factors = None
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_correct_relative_codes_with_non_default_scale(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
expected_rel_codes = [[-1.581139, -0.790569, -1.175573],
[-0.136083, -0.816497, -3.583519]]
def graph_fn(boxes, anchors):
scale_factors = [2, 3, 4]
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-03,
atol=1e-03)
def test_correct_relative_codes_with_small_width(self):
boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32)
expected_rel_codes = [[-1.317616, 0., -20.670586]]
def graph_fn(boxes, anchors):
scale_factors = None
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_correct_boxes_with_default_scale(self):
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
rel_codes = np.array([[-0.5, -0.416666, -0.405465],
[-0.083333, -0.222222, -0.693147]], np.float32)
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
def graph_fn(rel_codes, anchors):
scale_factors = None
anchors = box_list.BoxList(anchors)
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors).get()
return boxes
boxes_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,
atol=1e-04)
def test_correct_boxes_with_non_default_scale(self):
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
rel_codes = np.array(
[[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]], np.float32)
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
def graph_fn(rel_codes, anchors):
scale_factors = [2, 3, 4]
anchors = box_list.BoxList(anchors)
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors).get()
return boxes
boxes_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,
atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 5,104 | 43.391304 | 80 | py |
models | models-master/research/object_detection/box_coders/faster_rcnn_box_coder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.faster_rcnn_box_coder."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.core import box_list
from object_detection.utils import test_case
class FasterRcnnBoxCoderTest(test_case.TestCase):
def test_get_correct_relative_codes_after_encoding(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
expected_rel_codes = [[-0.5, -0.416666, -0.405465, -0.182321],
[-0.083333, -0.222222, -0.693147, -1.098612]]
def graph_fn(boxes, anchors):
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_get_correct_relative_codes_after_encoding_with_scaling(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
expected_rel_codes = [[-1., -1.25, -1.62186, -0.911608],
[-0.166667, -0.666667, -2.772588, -5.493062]]
def graph_fn(boxes, anchors):
scale_factors = [2, 3, 4, 5]
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
def test_get_correct_boxes_after_decoding(self):
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
rel_codes = np.array([[-0.5, -0.416666, -0.405465, -0.182321],
[-0.083333, -0.222222, -0.693147, -1.098612]],
np.float32)
expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
def graph_fn(rel_codes, anchors):
anchors = box_list.BoxList(anchors)
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
boxes = coder.decode(rel_codes, anchors)
return boxes.get()
boxes_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,
atol=1e-04)
def test_get_correct_boxes_after_decoding_with_scaling(self):
anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],
np.float32)
rel_codes = np.array([[-1., -1.25, -1.62186, -0.911608],
[-0.166667, -0.666667, -2.772588, -5.493062]],
np.float32)
expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
def graph_fn(rel_codes, anchors):
scale_factors = [2, 3, 4, 5]
anchors = box_list.BoxList(anchors)
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors).get()
return boxes
boxes_out = self.execute(graph_fn, [rel_codes, anchors])
self.assertAllClose(expected_boxes, boxes_out, rtol=1e-04,
atol=1e-04)
def test_very_small_Width_nan_after_encoding(self):
boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32)
anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32)
expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826]]
def graph_fn(boxes, anchors):
boxes = box_list.BoxList(boxes)
anchors = box_list.BoxList(anchors)
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
rel_codes = coder.encode(boxes, anchors)
return rel_codes
rel_codes_out = self.execute(graph_fn, [boxes, anchors])
self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 5,071 | 43.491228 | 80 | py |
models | models-master/research/object_detection/box_coders/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/utils/label_map_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import numpy as np
from six import string_types
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
_LABEL_OFFSET = 1
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {'id': item.id, 'name': name}
if item.HasField('frequency'):
if item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'FREQUENT'):
category['frequency'] = 'f'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'COMMON'):
category['frequency'] = 'c'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'RARE'):
category['frequency'] = 'r'
if item.HasField('instance_count'):
category['instance_count'] = item.instance_count
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError('Duplicate keypoint ids are not allowed. '
'Found {} more than once'.format(kv.id))
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
def load_labelmap(path, validator=None):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
validator: Handle for a function that takes the loaded label map as input
and validates it. The validator is expected to raise ValueError for an
invalid label map. If None, uses the default validator.
Returns:
a StringIntLabelMapProto
"""
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
if validator is None:
validator = _validate_label_map
validator(label_map)
return label_map
def get_label_map_dict(label_map_path_or_proto,
use_display_name=False,
fill_in_gaps_and_background=False,
validator=None):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
validator: Handle for a function that takes the loaded label map as input
and validates it. The validator is expected to raise ValueError for an
invalid label map. If None, uses the default validator.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
if validator is None:
validator = _validate_label_map
validator(label_map_path_or_proto)
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
# TODO(rathodv): Add a prefix 'class_' here once the tool to generate
# teacher annotation adds this prefix in the data.
label_map_dict[str(value)] = value
return label_map_dict
def get_keypoint_label_map_dict(label_map_path_or_proto):
"""Reads a label map and returns a dictionary of keypoint names to ids.
Note that the keypoints belong to different classes will be merged into a
single dictionary. It is expected that there is no duplicated keypoint names
or ids from different classes.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
Returns:
A dictionary mapping keypoint names to the keypoint id (not the object id).
Raises:
ValueError: if there are duplicated keyoint names or ids.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
for kpts in item.keypoints:
if kpts.label in label_map_dict.keys():
raise ValueError('Duplicated keypoint label: %s' % kpts.label)
if kpts.id in label_map_dict.values():
raise ValueError('Duplicated keypoint ID: %d' % kpts.id)
label_map_dict[kpts.label] = kpts.id
return label_map_dict
def get_label_map_hierarchy_lut(label_map_path_or_proto,
include_identity=False,
validator=None):
"""Reads a label map and returns ancestors and descendants in the hierarchy.
The function returns the ancestors and descendants as separate look up tables
(LUT) numpy arrays of shape [max_id, max_id] where lut[i,j] = 1 when there is
a hierarchical relationship between class i and j.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
include_identity: Boolean to indicate whether to include a class element
among its ancestors and descendants. Setting this will result in the lut
diagonal being set to 1.
validator: Handle for a function that takes the loaded label map as input
and validates it. The validator is expected to raise ValueError for an
invalid label map. If None, uses the default validator.
Returns:
ancestors_lut: Look up table with the ancestors.
descendants_lut: Look up table with the descendants.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
if validator is None:
validator = _validate_label_map
validator(label_map_path_or_proto)
label_map = label_map_path_or_proto
hierarchy_dict = {
'ancestors': collections.defaultdict(list),
'descendants': collections.defaultdict(list)
}
max_id = -1
for item in label_map.item:
max_id = max(max_id, item.id)
for ancestor in item.ancestor_ids:
hierarchy_dict['ancestors'][item.id].append(ancestor)
for descendant in item.descendant_ids:
hierarchy_dict['descendants'][item.id].append(descendant)
def get_graph_relations_tensor(graph_relations):
graph_relations_tensor = np.zeros([max_id, max_id])
for id_val, ids_related in graph_relations.items():
id_val = int(id_val) - _LABEL_OFFSET
for id_related in ids_related:
id_related -= _LABEL_OFFSET
graph_relations_tensor[id_val, id_related] = 1
if include_identity:
graph_relations_tensor += np.eye(max_id)
return graph_relations_tensor
ancestors_lut = get_graph_relations_tensor(hierarchy_dict['ancestors'])
descendants_lut = get_graph_relations_tensor(hierarchy_dict['descendants'])
return ancestors_lut, descendants_lut
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
'keypoints': a dictionary of keypoint string label to integer id. It is only
returned when available in label map proto.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| 14,370 | 36.327273 | 80 | py |
models | models-master/research/object_detection/utils/np_box_list_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations for Numpy BoxLists.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_ops
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ASCEND = 1
DESCEND = 2
def area(boxlist):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
y_min, x_min, y_max, x_max = boxlist.get_coordinates()
return (y_max - y_min) * (x_max - x_min)
def intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
return np_box_ops.intersection(boxlist1.get(), boxlist2.get())
def iou(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
return np_box_ops.iou(boxlist1.get(), boxlist2.get())
def ioa(boxlist1, boxlist2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
return np_box_ops.ioa(boxlist1.get(), boxlist2.get())
def gather(boxlist, indices, fields=None):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int_
"""
if indices.size:
if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0:
raise ValueError('indices are out of valid range.')
subboxlist = np_box_list.BoxList(boxlist.get()[indices, :])
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
extra_field_data = boxlist.get_field(field)
subboxlist.add_field(field, extra_field_data[indices, ...])
return subboxlist
def sort_by_field(boxlist, field, order=SortOrder.DESCEND):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) 'descend' or 'ascend'. Default is descend.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist or is not of single dimension.
ValueError: if the order is not either descend or ascend.
"""
if not boxlist.has_field(field):
raise ValueError('Field ' + field + ' does not exist')
if len(boxlist.get_field(field).shape) != 1:
raise ValueError('Field ' + field + 'should be single dimension.')
if order != SortOrder.DESCEND and order != SortOrder.ASCEND:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
sorted_indices = np.argsort(field_to_sort)
if order == SortOrder.DESCEND:
sorted_indices = sorted_indices[::-1]
return gather(boxlist, sorted_indices)
def non_max_suppression(boxlist,
max_output_size=10000,
iou_threshold=1.0,
score_threshold=-10.0):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. In each iteration, the detected bounding box with
highest score in the available pool is selected.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores. All scores belong to the same class.
max_output_size: maximum number of retained boxes
iou_threshold: intersection over union threshold.
score_threshold: minimum score threshold. Remove the boxes with scores
less than this value. Default value is set to -10. A very
low threshold to pass pretty much all the boxes, unless
the user sets a different score threshold.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if 'scores' field does not exist
ValueError: if threshold is not in [0, 1]
ValueError: if max_output_size < 0
"""
if not boxlist.has_field('scores'):
raise ValueError('Field scores does not exist')
if iou_threshold < 0. or iou_threshold > 1.0:
raise ValueError('IOU threshold must be in [0, 1]')
if max_output_size < 0:
raise ValueError('max_output_size must be bigger than 0.')
boxlist = filter_scores_greater_than(boxlist, score_threshold)
if boxlist.num_boxes() == 0:
return boxlist
boxlist = sort_by_field(boxlist, 'scores')
# Prevent further computation if NMS is disabled.
if iou_threshold == 1.0:
if boxlist.num_boxes() > max_output_size:
selected_indices = np.arange(max_output_size)
return gather(boxlist, selected_indices)
else:
return boxlist
boxes = boxlist.get()
num_boxes = boxlist.num_boxes()
# is_index_valid is True only for all remaining valid boxes,
is_index_valid = np.full(num_boxes, 1, dtype=bool)
selected_indices = []
num_output = 0
for i in range(num_boxes):
if num_output < max_output_size:
if is_index_valid[i]:
num_output += 1
selected_indices.append(i)
is_index_valid[i] = False
valid_indices = np.where(is_index_valid)[0]
if valid_indices.size == 0:
break
intersect_over_union = np_box_ops.iou(
np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :])
intersect_over_union = np.squeeze(intersect_over_union, axis=0)
is_index_valid[valid_indices] = np.logical_and(
is_index_valid[valid_indices],
intersect_over_union <= iou_threshold)
return gather(boxlist, np.array(selected_indices))
def multi_class_non_max_suppression(boxlist, score_thresh, iou_thresh,
max_output_size):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores. This scores field is a tensor that can
be 1 dimensional (in the case of a single class) or 2-dimensional, which
which case we assume that it takes the shape [num_boxes, num_classes].
We further assume that this rank is known statically and that
scores.shape[1] is also known (i.e., the number of classes is fixed
and known at graph construction time).
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap
with previously selected boxes are removed).
max_output_size: maximum number of retained boxes per class.
Returns:
a BoxList holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape) == 1:
scores = np.reshape(scores, [-1, 1])
elif len(scores.shape) == 2:
if scores.shape[1] is None:
raise ValueError('scores field must have statically defined second '
'dimension')
else:
raise ValueError('scores field must be of rank 1 or 2')
num_boxes = boxlist.num_boxes()
num_scores = scores.shape[0]
num_classes = scores.shape[1]
if num_boxes != num_scores:
raise ValueError('Incorrect scores field length: actual vs expected.')
selected_boxes_list = []
for class_idx in range(num_classes):
boxlist_and_class_scores = np_box_list.BoxList(boxlist.get())
class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])
boxlist_and_class_scores.add_field('scores', class_scores)
boxlist_filt = filter_scores_greater_than(boxlist_and_class_scores,
score_thresh)
nms_result = non_max_suppression(boxlist_filt,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
nms_result.add_field(
'classes', np.zeros_like(nms_result.get_field('scores')) + class_idx)
selected_boxes_list.append(nms_result)
selected_boxes = concatenate(selected_boxes_list)
sorted_boxes = sort_by_field(selected_boxes, 'scores')
return sorted_boxes
def scale(boxlist, y_scale, x_scale):
"""Scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: float
x_scale: float
Returns:
boxlist: BoxList holding N boxes
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = np_box_list.BoxList(np.hstack([y_min, x_min, y_max, x_max]))
fields = boxlist.get_extra_fields()
for field in fields:
extra_field_data = boxlist.get_field(field)
scaled_boxlist.add_field(field, extra_field_data)
return scaled_boxlist
def clip_to_window(boxlist, window, filter_nonoverlapping=True):
"""Clip bounding boxes to a window.
This op clips input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a numpy array of shape [4] representing the
[y_min, x_min, y_max, x_max] window to which the op
should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)
y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)
x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)
x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)
clipped = np_box_list.BoxList(
np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped]))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = np.reshape(
np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
minoverlap: Minimum required overlap between boxes, to count them as
overlapping.
Returns:
A pruned boxlist with size [N', 4].
"""
intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor
intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor
keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))
keep_inds = np.nonzero(keep_bool)[0]
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1
def prune_outside_window(boxlist, window):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also ClipToWindow which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
of the window.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
coordinate_violations = np.hstack([np.less(y_min, win_y_min),
np.less(x_min, win_x_min),
np.greater(y_max, win_y_max),
np.greater(x_max, win_x_max)])
valid_indices = np.reshape(
np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def concatenate(boxlists, fields=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = np_box_list.BoxList(
np.vstack([boxlist.get() for boxlist in boxlists]))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).shape
first_field_shape = first_field_shape[1:]
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).shape
field_shape = field_shape[1:]
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = np.concatenate(
[boxlist.get_field(field) for boxlist in boxlists], axis=0)
concatenated.add_field(field, concatenated_field)
return concatenated
def filter_scores_greater_than(boxlist, thresh):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape) == 2 and scores.shape[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = np.reshape(np.where(np.greater(scores, thresh)),
[-1]).astype(np.int32)
return gather(boxlist, high_score_indices)
def change_coordinate_frame(boxlist, window):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: a size 4 1-D numpy array.
Returns:
Returns a BoxList object with N boxes.
"""
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(
np_box_list.BoxList(boxlist.get() -
[window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
_copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def _update_valid_indices_by_removing_high_iou_boxes(
selected_indices, is_index_valid, intersect_over_union, threshold):
max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)
return np.logical_and(is_index_valid, max_iou <= threshold)
| 20,918 | 36.090426 | 80 | py |
models | models-master/research/object_detection/utils/context_manager_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.context_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import context_manager
class ContextManagerTest(tf.test.TestCase):
def test_identity_context_manager(self):
with context_manager.IdentityContextManager() as identity_context:
self.assertIsNone(identity_context)
if __name__ == '__main__':
tf.test.main()
| 1,208 | 34.558824 | 80 | py |
models | models-master/research/object_detection/utils/static_shape.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to access TensorShape values.
The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_dim_as_int(dim):
"""Utility to get v1 or v2 TensorShape dim as an int.
Args:
dim: The TensorShape dimension to get as an int
Returns:
None or an int.
"""
try:
return dim.value
except AttributeError:
return dim
def get_batch_size(tensor_shape):
"""Returns batch size from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the batch size of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[0])
def get_height(tensor_shape):
"""Returns height from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the height of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[1])
def get_width(tensor_shape):
"""Returns width from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the width of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[2])
def get_depth(tensor_shape):
"""Returns depth from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the depth of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[3])
| 2,295 | 24.230769 | 80 | py |
models | models-master/research/object_detection/utils/test_case_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.object_detection.utils.test_case."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
class TestCaseTest(test_case.TestCase):
def test_simple(self):
def graph_fn(tensora, tensorb):
return tf.tensordot(tensora, tensorb, axes=1)
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output, 40.0)
def test_two_outputs(self):
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_function_with_tf_assert(self):
def compute_fn(image):
return tf.image.pad_to_bounding_box(image, 0, 0, 40, 40)
image_np = np.random.rand(2, 20, 30, 3)
output = self.execute(compute_fn, [image_np])
self.assertAllEqual(output.shape, [2, 40, 40, 3])
def test_tf2_only_test(self):
"""Set up tests only to run with TF2."""
if self.is_tf2():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tf2(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_tpu_only_test(self):
"""Set up tests only to run with TPU."""
if self.has_tpu():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tpu(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
if __name__ == '__main__':
tf.test.main()
| 2,776 | 36.527027 | 87 | py |
models | models-master/research/object_detection/utils/category_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for importing/exporting Object Detection categories."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import tensorflow.compat.v1 as tf
def load_categories_from_csv_file(csv_path):
"""Loads categories from a csv file.
The CSV file should have one comma delimited numeric category id and string
category name pair per line. For example:
0,"cat"
1,"dog"
2,"bird"
...
Args:
csv_path: Path to the csv file to be parsed into categories.
Returns:
categories: A list of dictionaries representing all possible categories.
The categories will contain an integer 'id' field and a string
'name' field.
Raises:
ValueError: If the csv file is incorrectly formatted.
"""
categories = []
with tf.gfile.Open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if not row:
continue
if len(row) != 2:
raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row))
category_id = int(row[0])
category_name = row[1]
categories.append({'id': category_id, 'name': category_name})
return categories
def save_categories_to_csv_file(categories, csv_path):
"""Saves categories to a csv file.
Args:
categories: A list of dictionaries representing categories to save to file.
Each category must contain an 'id' and 'name' field.
csv_path: Path to the csv file to be parsed into categories.
"""
categories.sort(key=lambda x: x['id'])
with tf.gfile.Open(csv_path, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for category in categories:
writer.writerow([category['id'], category['name']])
| 2,518 | 31.294872 | 80 | py |
models | models-master/research/object_detection/utils/np_box_mask_list_ops_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_mask_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.box_mask_list1 = np_box_mask_list.BoxMaskList(
box_data=boxes1, mask_data=masks1)
self.box_mask_list2 = np_box_mask_list.BoxMaskList(
box_data=boxes2, mask_data=masks2)
def test_area(self):
areas = np_box_mask_list_ops.area(self.box_mask_list1)
expected_areas = np.array([8.0, 10.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_mask_list_ops.intersection(self.box_mask_list1,
self.box_mask_list2)
expected_intersection = np.array([[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_mask_list_ops.iou(self.box_mask_list1, self.box_mask_list2)
expected_iou = np.array(
[[1.0, 0.0, 8.0 / 25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
ioa21 = np_box_mask_list_ops.ioa(self.box_mask_list1, self.box_mask_list2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array(
[[4.0, 3.0, 7.0, 6.0], [5.0, 6.0, 10.0, 10.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [5.0, 6.0, 10.0, 10.0], [1.0, 1.0, 10.0, 10.0]],
dtype=float)
masks1 = np.array(
[[[0, 1, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
masks2 = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 1, 1]],
[[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
self.boxes1 = boxes1
self.boxes2 = boxes2
self.masks1 = masks1
self.masks2 = masks2
def test_with_no_scores_field(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes1, mask_data=self.masks1)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_one(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes2, mask_data=self.masks2)
box_mask_list.add_field('scores',
np.array([.9, .75, .6], dtype=float))
max_output_size = 1
iou_threshold = 1. # No NMS
expected_boxes = np.array([[3.0, 4.0, 6.0, 8.0]], dtype=float)
expected_masks = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]]], dtype=np.uint8)
nms_box_mask_list = np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
self.assertAllClose(nms_box_mask_list.get(), expected_boxes)
self.assertAllClose(nms_box_mask_list.get_masks(), expected_masks)
def test_multiclass_nms(self):
boxes = np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
mask0 = np.array([[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
dtype=np.uint8)
masks = np.stack([mask0, mask1, mask2])
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
box_mask_list.add_field('scores', scores)
box_mask_list_clean = np_box_mask_list_ops.multi_class_non_max_suppression(
box_mask_list, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = box_mask_list_clean.get_field('scores')
classes_clean = box_mask_list_clean.get_field('classes')
boxes = box_mask_list_clean.get()
masks = box_mask_list_clean.get_masks()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| 8,156 | 40.617347 | 80 | py |
models | models-master/research/object_detection/utils/json_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.utils.json_utils."""
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.12345], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.12\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.12545, float_digits=2)
self.assertEqual(s, '1.13')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
| 3,598 | 35.72449 | 80 | py |
models | models-master/research/object_detection/utils/visualization_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getbbox(ds)[3] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_keypoint_scores=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4]: masks (optional)
[4-5]: keypoints (optional)
[4-6]: keypoint_scores (optional)
[4-7]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_keypoint_scores: Whether keypoint scores should be expected as a
positional argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
keypoint_scores - a numpy array of shape [N, num_keypoints].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = keypoint_scores = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoint_scores:
keypoint_scores = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def draw_heatmaps_on_image(image, heatmaps):
"""Draws heatmaps on an image.
The heatmaps are handled channel by channel and different colors are used to
paint different heatmap channels.
Args:
image: a PIL.Image object.
heatmaps: a numpy array with shape [image_height, image_width, channel].
Note that the image_height and image_width should match the size of input
image.
"""
draw = ImageDraw.Draw(image)
channel = heatmaps.shape[2]
for c in range(channel):
heatmap = heatmaps[:, :, c] * 255
heatmap = heatmap.astype('uint8')
bitmap = Image.fromarray(heatmap, 'L')
bitmap.convert('1')
draw.bitmap(
xy=[(0, 0)],
bitmap=bitmap,
fill=STANDARD_COLORS[c])
def draw_heatmaps_on_image_array(image, heatmaps):
"""Overlays heatmaps to an image (numpy array).
The function overlays the heatmaps on top of image. The heatmap values will be
painted with different colors depending on the channels. Similar to
"draw_heatmaps_on_image_array" function except the inputs are numpy arrays.
Args:
image: a numpy array with shape [height, width, 3].
heatmaps: a numpy array with shape [height, width, channel].
Returns:
An uint8 numpy array representing the input image painted with heatmap
colors.
"""
if not isinstance(image, np.ndarray):
image = image.numpy()
if not isinstance(heatmaps, np.ndarray):
heatmaps = heatmaps.numpy()
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_heatmaps_on_image(image_pil, heatmaps)
return np.array(image_pil)
def draw_heatmaps_on_image_tensors(images,
heatmaps,
apply_sigmoid=False):
"""Draws heatmaps on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the
heatmaps will be resized to match the input image size before overlaying
the heatmaps with input images. Theoretically the heatmap height width
should have the same aspect ratio as the input image to avoid potential
misalignment introduced by the image resize.
apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If
the heatmaps come directly from the prediction logits, then we should
apply the sigmoid layer to make sure the values are in between [0.0, 1.0].
Returns:
4D image tensor of type uint8, with heatmaps overlaid on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)
if apply_sigmoid:
heatmaps = tf.math.sigmoid(heatmaps)
resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])
elems = [images, resized_heatmaps]
def draw_heatmaps(image_and_heatmaps):
"""Draws heatmaps on image."""
image_with_heatmaps = tf.py_function(
draw_heatmaps_on_image_array,
image_and_heatmaps,
tf.uint8)
return image_with_heatmaps
images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)
return images
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_scores: A 3D float32 tensor of shape [N, max_detection,
num_keypoints] with keypoint scores.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_keypoint_scores=keypoint_scores is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if keypoint_scores is not None:
elems.append(keypoint_scores)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if (key != input_data_fields.original_image and
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
num_gt_boxes = [-1] * eval_dict[input_data_fields.original_image].shape[0]
if input_data_fields.num_groundtruth_boxes in eval_dict:
num_gt_boxes = tf.cast(eval_dict[input_data_fields.num_groundtruth_boxes],
tf.int32)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
keypoint_scores = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
if detection_fields.detection_keypoint_scores in eval_dict:
keypoint_scores = tf.expand_dims(
eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)
else:
keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[detection_fields.detection_keypoints][indx]),
dtype=tf.float32), axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
groundtruth_keypoints = None
groundtruth_keypoint_scores = None
gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities
if input_data_fields.groundtruth_keypoints in eval_dict:
groundtruth_keypoints = tf.expand_dims(
eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)
if gt_kpt_vis_fld in eval_dict:
groundtruth_keypoint_scores = tf.expand_dims(
tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)
else:
groundtruth_keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[input_data_fields.groundtruth_keypoints][indx]),
dtype=tf.float32), axis=0)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
num_gt_boxes_i = num_gt_boxes[indx]
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=groundtruth_keypoints,
keypoint_scores=groundtruth_keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat([images_with_detections,
images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_densepose_visualizations(eval_dict,
max_boxes_to_draw=20,
min_score_thresh=0.2,
num_parts=24,
dp_coord_to_visualize=0):
"""Draws DensePose visualizations.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example().
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
num_parts: The number of different densepose parts.
dp_coord_to_visualize: Whether to visualize v-coordinates (0) or
u-coordinates (0) overlaid on the person masks.
Returns:
A list of [1, H, W, C] uint8 tensor, each element corresponding to an image
in the batch.
Raises:
ValueError: If `dp_coord_to_visualize` is not 0 or 1.
"""
if dp_coord_to_visualize not in (0, 1):
raise ValueError('`dp_coord_to_visualize` must be either 0 for v '
'coordinates), or 1 for u coordinates, but instead got '
'{}'.format(dp_coord_to_visualize))
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
if detection_fields.detection_masks not in eval_dict:
raise ValueError('Expected `detection_masks` in `eval_dict`.')
if detection_fields.detection_surface_coords not in eval_dict:
raise ValueError('Expected `detection_surface_coords` in `eval_dict`.')
images_with_detections_list = []
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
# Note that detection masks have already been resized to the original image
# shapes, but `original_image` has not.
# TODO(ronnyvotel): Consider resizing `original_image` in
# eval_util.result_dict_for_batched_example().
true_shape = eval_dict[input_data_fields.true_image_shape][indx]
original_shape = eval_dict[
input_data_fields.original_image_spatial_shape][indx]
image = eval_dict[input_data_fields.original_image][indx]
image = shape_utils.pad_or_clip_nd(image, [true_shape[0], true_shape[1], 3])
image = _resize_original_image(image, original_shape)
scores = eval_dict[detection_fields.detection_scores][indx]
detection_masks = eval_dict[detection_fields.detection_masks][indx]
surface_coords = eval_dict[detection_fields.detection_surface_coords][indx]
def draw_densepose_py_func(image, detection_masks, surface_coords, scores):
"""Overlays part masks and surface coords on original images."""
surface_coord_image = np.copy(image)
for i, (score, surface_coord, mask) in enumerate(
zip(scores, surface_coords, detection_masks)):
if i == max_boxes_to_draw:
break
if score > min_score_thresh:
draw_part_mask_on_image_array(image, mask, num_parts=num_parts)
draw_float_channel_on_image_array(
surface_coord_image, surface_coord[:, :, dp_coord_to_visualize],
mask)
return np.concatenate([image, surface_coord_image], axis=1)
image_with_densepose = tf.py_func(
draw_densepose_py_func,
[image, detection_masks, surface_coords, scores],
tf.uint8)
images_with_detections_list.append(
image_with_densepose[tf.newaxis, :, :, :])
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only
those keypoints with a score above score_threshold will be visualized.
min_score_thresh: A scalar indicating the minimum keypoint score required
for a keypoint to be visualized. Note that keypoint_scores must be
provided for this threshold to take effect.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil,
keypoints,
keypoint_scores=keypoint_scores,
min_score_thresh=min_score_thresh,
color=color,
radius=radius,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=keypoint_edge_color,
keypoint_edge_width=keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints].
min_score_thresh: a score threshold for visualizing keypoints. Only used if
keypoint_scores is provided.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints = np.array(keypoints)
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
if keypoint_scores is not None:
keypoint_scores = np.array(keypoint_scores)
valid_kpt = np.greater(keypoint_scores, min_score_thresh)
else:
valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),
np.zeros_like(keypoints[:, 0]),
np.ones_like(keypoints[:, 0]))
valid_kpt = [v for v in valid_kpt]
for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):
if valid:
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*(mask > 0))).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_part_mask_on_image_array(image, mask, alpha=0.4, num_parts=24):
"""Draws part mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.4)
num_parts: the maximum number of parts that may exist in the image (default
24 for DensePose).
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
pil_image = Image.fromarray(image)
part_colors = np.zeros_like(image)
mask_1_channel = mask[:, :, np.newaxis]
for i, color in enumerate(STANDARD_COLORS[:num_parts]):
rgb = np.array(ImageColor.getrgb(color), dtype=np.uint8)
part_colors += (mask_1_channel == i + 1) * rgb[np.newaxis, np.newaxis, :]
pil_part_colors = Image.fromarray(np.uint8(part_colors)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_part_colors, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_float_channel_on_image_array(image, channel, mask, alpha=0.9,
cmap='YlGn'):
"""Draws a floating point channel on an image array.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
channel: float32 numpy array with shape (img_height, img_height). The values
should be in the range [0, 1], and will be mapped to colors using the
provided colormap `cmap` argument.
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.9)
cmap: string with the colormap to use.
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if channel.dtype != np.float32:
raise ValueError('`channel` not of type np.float32')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != channel.shape:
raise ValueError('The image has spatial dimensions %s but the channel has '
'dimensions %s' % (image.shape[:2], channel.shape))
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
cm = plt.get_cmap(cmap)
pil_image = Image.fromarray(image)
colored_channel = cm(channel)[:, :, :3]
pil_colored_channel = Image.fromarray(
np.uint8(colored_channel * 255)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_colored_channel, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
mask_alpha=.4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a uint8 numpy array of shape [N, image_height, image_width],
can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None.
keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box or keypoint to be
visualized.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
mask_alpha: transparency value between 0 and 1 (default: 0.4).
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_keypoint_scores_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if keypoint_scores is not None:
box_to_keypoint_scores_map[box].extend(keypoint_scores[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(round(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, round(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color,
alpha=mask_alpha
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
keypoint_scores_for_box = None
if box_to_keypoint_scores_map:
keypoint_scores_for_box = box_to_keypoint_scores_map[box]
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
keypoint_scores_for_box,
min_score_thresh=min_score_thresh,
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1)
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1)
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.InputDataFields.groundtruth_keypoints - (optional)
[batch_size, num_boxes, num_keypoints, 2] float32 tensor with
keypoint coordinates in format [y, x].
fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)
[batch_size, num_boxes, num_keypoints] bool tensor with
keypoint visibilities.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
fields.DetectionResultFields.detection_keypoint_scores - (optional)
[batch_size, max_num_boxes, num_keypoints] float32 tensor with
keypoints scores.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
| 64,217 | 41.276498 | 80 | py |
models | models-master/research/object_detection/utils/dataset_util_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.dataset_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
class DatasetUtilTest(tf.test.TestCase):
def test_read_examples_list(self):
example_list_data = """example1 1\nexample2 2"""
example_list_path = os.path.join(self.get_temp_dir(), 'examples.txt')
with tf.gfile.Open(example_list_path, 'wb') as f:
f.write(example_list_data)
examples = dataset_util.read_examples_list(example_list_path)
self.assertListEqual(['example1', 'example2'], examples)
if __name__ == '__main__':
tf.test.main()
| 1,416 | 32.738095 | 80 | py |
models | models-master/research/object_detection/utils/per_image_evaluation.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate Object Detection result on a single image.
Annotate each detected result as true positives or false positive according to
a predefined IOU ratio. Non Maximum Supression is used by default. Multi class
detection is supported by default.
Based on the settings, per image evaluation is either performed on boxes or
on object masks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class PerImageEvaluation(object):
"""Evaluate detection result of a single image."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=0.3,
nms_max_output_boxes=50,
group_of_weight=0.0):
"""Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not
nms_iou_threshold: IOU threshold used in Non Maximum Suppression.
nms_max_output_boxes: Number of maximum output boxes in NMS.
group_of_weight: Weight of the group-of boxes.
"""
self.matching_iou_threshold = matching_iou_threshold
self.nms_iou_threshold = nms_iou_threshold
self.nms_max_output_boxes = nms_max_output_boxes
self.num_groundtruth_classes = num_groundtruth_classes
self.group_of_weight = group_of_weight
def compute_object_detection_metrics(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Evaluates detections as being tp, fp or weighted from a single image.
The evaluation is done in two stages:
1. All detections are matched to non group-of boxes; true positives are
determined and detections matched to difficult boxes are ignored.
2. Detections that are determined as false positives are matched against
group-of boxes and weighted if matched.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the metrics will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width]. Can have empty masks, i.e. where all values are 0.
Returns:
scores: A list of C float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
tp_fp_labels: A list of C boolean numpy arrays. Each numpy array
is of shape [K, 1], representing K True/False positive label of
object instances detected with class label c
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
"""
detected_boxes, detected_scores, detected_class_labels, detected_masks = (
self._remove_invalid_boxes(detected_boxes, detected_scores,
detected_class_labels, detected_masks))
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
is_class_correctly_detected_in_image = self._compute_cor_loc(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
return scores, tp_fp_labels, is_class_correctly_detected_in_image
def _compute_cor_loc(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for object detection result.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if (detected_masks is not None and
groundtruth_masks is None) or (detected_masks is None and
groundtruth_masks is not None):
raise ValueError(
'If `detected_masks` is provided, then `groundtruth_masks` should '
'also be provided.')
is_class_correctly_detected_in_image = np.zeros(
self.num_groundtruth_classes, dtype=int)
for i in range(self.num_groundtruth_classes):
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
is_class_correctly_detected_in_image[i] = (
self._compute_is_class_correctly_detected_in_image(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class))
return is_class_correctly_detected_in_image
def _compute_is_class_correctly_detected_in_image(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for a single class.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a
class is correctly detected in the image or not
"""
if detected_boxes.size > 0:
if groundtruth_boxes.size > 0:
max_score_id = np.argmax(detected_scores)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
if mask_mode:
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=np.expand_dims(detected_boxes[max_score_id], axis=0),
mask_data=np.expand_dims(detected_masks[max_score_id], axis=0))
gt_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes, mask_data=groundtruth_masks)
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_boxlist)
else:
detected_boxlist = np_box_list.BoxList(
np.expand_dims(detected_boxes[max_score_id, :], axis=0))
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist)
if np.max(iou) >= self.matching_iou_threshold:
return 1
return 0
def _compute_tp_fp(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels true/false positives of detections of an image across all classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
result_scores: A list of float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of
shape [K, 1], representing K True/False positive label of object
instances detected with class label c
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if detected_masks is not None and groundtruth_masks is None:
raise ValueError(
'Detected masks is available but groundtruth masks is not.')
if detected_masks is None and groundtruth_masks is not None:
raise ValueError(
'Groundtruth masks is available but detected masks is not.')
result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
groundtruth_is_difficult_list_at_ith_class = (
groundtruth_is_difficult_list[groundtruth_class_labels == i])
groundtruth_is_group_of_list_at_ith_class = (
groundtruth_is_group_of_list[groundtruth_class_labels == i])
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
groundtruth_is_difficult_list=groundtruth_is_difficult_list_at_ith_class,
groundtruth_is_group_of_list=groundtruth_is_group_of_list_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class)
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
def _get_overlaps_and_scores_mask_mode(self, detected_boxes, detected_scores,
detected_masks, groundtruth_boxes,
groundtruth_masks,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth masks.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
detected_masks: A uint8 numpy array of shape [N, height, width]. If not
None, the scores will be computed based on masks.
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_masks: A uint8 numpy array of shape [M, height, width].
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=detected_boxes, mask_data=detected_masks)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_mask_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[~groundtruth_is_group_of_list],
mask_data=groundtruth_masks[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[groundtruth_is_group_of_list],
mask_data=groundtruth_masks[groundtruth_is_group_of_list])
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_mask_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _get_overlaps_and_scores_box_mode(self, detected_boxes, detected_scores,
groundtruth_boxes,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth boxes.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[groundtruth_is_group_of_list])
iou = np_box_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _compute_tp_fp_for_single_class(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels boxes detected with the same class from the same image as tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not. If a
groundtruth box is difficult, every detection matching this box is
ignored.
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
Two arrays of the same size, containing all boxes that were evaluated as
being true positives or false positives; if a box matched to a difficult
box or to a group-of box, it is ignored.
scores: A numpy array representing the detection scores.
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_boxes.size == 0:
return np.array([], dtype=float), np.array([], dtype=bool)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
iou = np.ndarray([0, 0])
ioa = np.ndarray([0, 0])
iou_mask = np.ndarray([0, 0])
ioa_mask = np.ndarray([0, 0])
if mask_mode:
# For Instance Segmentation Evaluation on Open Images V5, not all boxed
# instances have corresponding segmentation annotations. Those boxes that
# dont have segmentation annotations are represented as empty masks in
# groundtruth_masks nd array.
mask_presence_indicator = (np.sum(groundtruth_masks, axis=(1, 2)) > 0)
(iou_mask, ioa_mask, scores,
num_detected_boxes) = self._get_overlaps_and_scores_mask_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_masks=detected_masks,
groundtruth_boxes=groundtruth_boxes[mask_presence_indicator, :],
groundtruth_masks=groundtruth_masks[mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
mask_presence_indicator])
if sum(mask_presence_indicator) < len(mask_presence_indicator):
# Not all masks are present - some masks are empty
(iou, ioa, _,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes[~mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
~mask_presence_indicator])
num_detected_boxes = detected_boxes.shape[0]
else:
mask_presence_indicator = np.zeros(
groundtruth_is_group_of_list.shape, dtype=bool)
(iou, ioa, scores,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes,
groundtruth_is_group_of_list=groundtruth_is_group_of_list)
if groundtruth_boxes.size == 0:
return scores, np.zeros(num_detected_boxes, dtype=bool)
tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_box = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_difficult = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_group_of = np.zeros(num_detected_boxes, dtype=bool)
def compute_match_iou(iou, groundtruth_nongroup_of_is_difficult_list,
is_box):
"""Computes TP/FP for non group-of box matching.
The function updates the following local variables:
tp_fp_labels - if a box is matched to group-of
is_matched_to_difficult - the detections that were processed at this are
matched to difficult box.
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
iou: intersection-over-union matrix [num_gt_boxes]x[num_det_boxes].
groundtruth_nongroup_of_is_difficult_list: boolean that specifies if gt
box is difficult.
is_box: boolean that specifies if currently boxes or masks are
processed.
"""
max_overlap_gt_ids = np.argmax(iou, axis=1)
is_gt_detected = np.zeros(iou.shape[1], dtype=bool)
for i in range(num_detected_boxes):
gt_id = max_overlap_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
iou[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
if not groundtruth_nongroup_of_is_difficult_list[gt_id]:
if not is_gt_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_detected[gt_id] = True
is_matched_to_box[i] = is_box
else:
is_matched_to_difficult[i] = True
def compute_match_ioa(ioa, is_box):
"""Computes TP/FP for group-of box matching.
The function updates the following local variables:
is_matched_to_group_of - if a box is matched to group-of
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
ioa: intersection-over-area matrix [num_gt_boxes]x[num_det_boxes].
is_box: boolean that specifies if currently boxes or masks are
processed.
Returns:
scores_group_of: of detections matched to group-of boxes
[num_groupof_matched].
tp_fp_labels_group_of: boolean array of size [num_groupof_matched], all
values are True.
"""
scores_group_of = np.zeros(ioa.shape[1], dtype=float)
tp_fp_labels_group_of = self.group_of_weight * np.ones(
ioa.shape[1], dtype=float)
max_overlap_group_of_gt_ids = np.argmax(ioa, axis=1)
for i in range(num_detected_boxes):
gt_id = max_overlap_group_of_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
ioa[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
is_matched_to_group_of[i] = True
is_matched_to_box[i] = is_box
scores_group_of[gt_id] = max(scores_group_of[gt_id], scores[i])
selector = np.where((scores_group_of > 0) & (tp_fp_labels_group_of > 0))
scores_group_of = scores_group_of[selector]
tp_fp_labels_group_of = tp_fp_labels_group_of[selector]
return scores_group_of, tp_fp_labels_group_of
# The evaluation is done in two stages:
# 1. Evaluate all objects that actually have instance level masks.
# 2. Evaluate all objects that are not already evaluated as boxes.
if iou_mask.shape[1] > 0:
groundtruth_is_difficult_mask_list = groundtruth_is_difficult_list[
mask_presence_indicator]
groundtruth_is_group_of_mask_list = groundtruth_is_group_of_list[
mask_presence_indicator]
compute_match_iou(
iou_mask,
groundtruth_is_difficult_mask_list[
~groundtruth_is_group_of_mask_list],
is_box=False)
scores_mask_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_mask_group_of = np.ndarray([0], dtype=float)
if ioa_mask.shape[1] > 0:
scores_mask_group_of, tp_fp_labels_mask_group_of = compute_match_ioa(
ioa_mask, is_box=False)
# Tp-fp evaluation for non-group of boxes (if any).
if iou.shape[1] > 0:
groundtruth_is_difficult_box_list = groundtruth_is_difficult_list[
~mask_presence_indicator]
groundtruth_is_group_of_box_list = groundtruth_is_group_of_list[
~mask_presence_indicator]
compute_match_iou(
iou,
groundtruth_is_difficult_box_list[~groundtruth_is_group_of_box_list],
is_box=True)
scores_box_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_box_group_of = np.ndarray([0], dtype=float)
if ioa.shape[1] > 0:
scores_box_group_of, tp_fp_labels_box_group_of = compute_match_ioa(
ioa, is_box=True)
if mask_mode:
# Note: here crowds are treated as ignore regions.
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of
& ~is_matched_to_box)
return np.concatenate(
(scores[valid_entries], scores_mask_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_mask_group_of))
else:
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of)
return np.concatenate(
(scores[valid_entries], scores_box_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_box_group_of))
def _get_ith_class_arrays(self, detected_boxes, detected_scores,
detected_masks, detected_class_labels,
groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, class_index):
"""Returns numpy arrays belonging to class with index `class_index`.
Args:
detected_boxes: A numpy array containing detected boxes.
detected_scores: A numpy array containing detected scores.
detected_masks: A numpy array containing detected masks.
detected_class_labels: A numpy array containing detected class labels.
groundtruth_boxes: A numpy array containing groundtruth boxes.
groundtruth_masks: A numpy array containing groundtruth masks.
groundtruth_class_labels: A numpy array containing groundtruth class
labels.
class_index: An integer index.
Returns:
gt_boxes_at_ith_class: A numpy array containing groundtruth boxes labeled
as ith class.
gt_masks_at_ith_class: A numpy array containing groundtruth masks labeled
as ith class.
detected_boxes_at_ith_class: A numpy array containing detected boxes
corresponding to the ith class.
detected_scores_at_ith_class: A numpy array containing detected scores
corresponding to the ith class.
detected_masks_at_ith_class: A numpy array containing detected masks
corresponding to the ith class.
"""
selected_groundtruth = (groundtruth_class_labels == class_index)
gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth]
if groundtruth_masks is not None:
gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth]
else:
gt_masks_at_ith_class = None
selected_detections = (detected_class_labels == class_index)
detected_boxes_at_ith_class = detected_boxes[selected_detections]
detected_scores_at_ith_class = detected_scores[selected_detections]
if detected_masks is not None:
detected_masks_at_ith_class = detected_masks[selected_detections]
else:
detected_masks_at_ith_class = None
return (gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class)
def _remove_invalid_boxes(self,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Removes entries with invalid boxes.
A box is invalid if either its xmax is smaller than its xmin, or its ymax
is smaller than its ymin.
Args:
detected_boxes: A float numpy array of size [num_boxes, 4] containing box
coordinates in [ymin, xmin, ymax, xmax] format.
detected_scores: A float numpy array of size [num_boxes].
detected_class_labels: A int32 numpy array of size [num_boxes].
detected_masks: A uint8 numpy array of size [num_boxes, height, width].
Returns:
valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4]
containing box coordinates in [ymin, xmin, ymax, xmax] format.
valid_detected_scores: A float numpy array of size [num_valid_boxes].
valid_detected_class_labels: A int32 numpy array of size
[num_valid_boxes].
valid_detected_masks: A uint8 numpy array of size
[num_valid_boxes, height, width].
"""
valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],
detected_boxes[:, 1] < detected_boxes[:, 3])
detected_boxes = detected_boxes[valid_indices]
detected_scores = detected_scores[valid_indices]
detected_class_labels = detected_class_labels[valid_indices]
if detected_masks is not None:
detected_masks = detected_masks[valid_indices]
return [
detected_boxes, detected_scores, detected_class_labels, detected_masks
]
| 34,228 | 47.414427 | 83 | py |
models | models-master/research/object_detection/utils/json_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with writing json strings.
json_utils wraps json.dump and json.dumps so that they can be used to safely
control the precision of floats when writing to json strings or files.
"""
import json
import re
def FormatFloat(json_str, float_digits):
pattern = re.compile(r'\d+\.\d+')
float_repr = '{:.' + '{}'.format(float_digits) + 'f}'
def MRound(match):
return float_repr.format(float(match.group()))
return re.sub(pattern, MRound, json_str)
def Dump(obj, fid, float_digits=-1, **params):
"""Wrapper of json.dump that allows specifying the float precision used.
Args:
obj: The object to dump.
fid: The file id to write to.
float_digits: The number of digits of precision when writing floats out.
**params: Additional parameters to pass to json.dumps.
"""
json_str = Dumps(obj, float_digits, **params)
fid.write(json_str)
def Dumps(obj, float_digits=-1, **params):
"""Wrapper of json.dumps that allows specifying the float precision used.
Args:
obj: The object to dump.
float_digits: The number of digits of precision when writing floats out.
**params: Additional parameters to pass to json.dumps.
Returns:
output: JSON string representation of obj.
"""
json_str = json.dumps(obj, **params)
if float_digits > -1:
json_str = FormatFloat(json_str, float_digits)
return json_str
def PrettyParams(**params):
"""Returns parameters for use with Dump and Dumps to output pretty json.
Example usage:
```json_str = json_utils.Dumps(obj, **json_utils.PrettyParams())```
```json_str = json_utils.Dumps(
obj, **json_utils.PrettyParams(allow_nans=False))```
Args:
**params: Additional params to pass to json.dump or json.dumps.
Returns:
params: Parameters that are compatible with json_utils.Dump and
json_utils.Dumps.
"""
params['float_digits'] = 4
params['sort_keys'] = True
params['indent'] = 2
params['separators'] = (',', ': ')
return params
| 2,694 | 31.46988 | 80 | py |
models | models-master/research/object_detection/utils/patch_ops.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for image patches."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def get_patch_mask(y, x, patch_size, image_shape):
"""Creates a 2D mask array for a square patch of a given size and location.
The mask is created with its center at the y and x coordinates, which must be
within the image. While the mask center must be within the image, the mask
itself can be partially outside of it. If patch_size is an even number, then
the mask is created with lower-valued coordinates first (top and left).
Args:
y: An integer or scalar int32 tensor. The vertical coordinate of the
patch mask center. Must be within the range [0, image_height).
x: An integer or scalar int32 tensor. The horizontal coordinate of the
patch mask center. Must be within the range [0, image_width).
patch_size: An integer or scalar int32 tensor. The square size of the
patch mask. Must be at least 1.
image_shape: A list or 1D int32 tensor representing the shape of the image
to which the mask will correspond, with the first two values being image
height and width. For example, [image_height, image_width] or
[image_height, image_width, image_channels].
Returns:
Boolean mask tensor of shape [image_height, image_width] with True values
for the patch.
Raises:
tf.errors.InvalidArgumentError: if x is not in the range [0, image_width), y
is not in the range [0, image_height), or patch_size is not at least 1.
"""
image_hw = image_shape[:2]
mask_center_yx = tf.stack([y, x])
with tf.control_dependencies([
tf.debugging.assert_greater_equal(
patch_size, 1,
message='Patch size must be >= 1'),
tf.debugging.assert_greater_equal(
mask_center_yx, 0,
message='Patch center (y, x) must be >= (0, 0)'),
tf.debugging.assert_less(
mask_center_yx, image_hw,
message='Patch center (y, x) must be < image (h, w)')
]):
mask_center_yx = tf.identity(mask_center_yx)
half_patch_size = tf.cast(patch_size, dtype=tf.float32) / 2
start_yx = mask_center_yx - tf.cast(tf.floor(half_patch_size), dtype=tf.int32)
end_yx = mask_center_yx + tf.cast(tf.ceil(half_patch_size), dtype=tf.int32)
start_yx = tf.maximum(start_yx, 0)
end_yx = tf.minimum(end_yx, image_hw)
start_y = start_yx[0]
start_x = start_yx[1]
end_y = end_yx[0]
end_x = end_yx[1]
lower_pad = image_hw[0] - end_y
upper_pad = start_y
left_pad = start_x
right_pad = image_hw[1] - end_x
mask = tf.ones([end_y - start_y, end_x - start_x], dtype=tf.bool)
return tf.pad(mask, [[upper_pad, lower_pad], [left_pad, right_pad]])
| 3,448 | 39.104651 | 80 | py |
models | models-master/research/object_detection/utils/visualization_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.visualization_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import PIL.Image as Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
def get_iterator_next_for_testing(dataset, is_tf2):
# In TF2, lookup tables are not supported in one shot iterators, but
# initialization is implicit.
if is_tf2:
return dataset.make_initializable_iterator().get_next()
# In TF1, we use one shot iterator because it does not require running
# a separate init op.
else:
return dataset.make_one_shot_iterator().get_next()
class VisualizationUtilsTest(test_case.TestCase):
def test_get_prime_multiplier_for_color_randomness(self):
# Show that default multipler is not 1 and does not divide the total number
# of standard colors.
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertNotEqual(
0, multiplier % len(visualization_utils.STANDARD_COLORS))
self.assertNotEqual(1, multiplier)
# Show that with 34 colors, the closest prime number to 34/10 that
# satisfies the constraints is 5.
default_standard_colors = visualization_utils.STANDARD_COLORS
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(34)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(5, multiplier)
# Show that with 110 colors, the closest prime number to 110/10 that
# satisfies the constraints is 13 (since 11 equally divides 110).
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(110)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(13, multiplier)
visualization_utils.STANDARD_COLORS = default_standard_colors
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def create_test_image_with_five_channels(self):
return np.full([100, 200, 5], 255, dtype=np.uint8)
def create_test_grayscale_image(self):
return np.full([100, 200, 1], 255, dtype=np.uint8)
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image_array(
test_image, ymin, xmin, ymax, xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
keypoints = tf.random.uniform((2, 2, 4, 2), maxval=1.0, dtype=tf.float32)
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
keypoints=keypoints,
min_score_thresh=0.2,
keypoint_edges=keypoint_edges))
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_track_ids(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75],
[0.5, 0.3, 0.7, 0.9],
[0.7, 0.5, 0.8, 0.9]],
[[0.41, 0.25, 0.75, 0.75],
[0.51, 0.3, 0.7, 0.9],
[0.75, 0.5, 0.8, 0.9]]])
classes = tf.constant([[1, 1, 2], [1, 1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.5, 0.7], [0.6, 0.5, 0.8]])
track_ids = tf.constant([[3, 9, 7], [3, 9, 144]], dtype=tf.int32)
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
track_ids=track_ids,
min_score_thresh=0.2))
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
return images_with_boxes
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
return images_with_boxes
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_scores = [0.8, 0.2, 0.2, 0.7]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image(
test_image,
keypoints,
keypoint_scores,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_with_default_keypoint_scores(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, np.nan], [0.4, 0.6], [np.nan, np.nan], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image_array(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
expected_result = np.asarray([[[0, 0, 0], [0, 0, 127]],
[[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
visualization_utils.draw_mask_on_image_array(test_image, mask,
color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_draw_part_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 6]], dtype=np.uint8)
visualization_utils.draw_part_mask_on_image_array(test_image, mask,
alpha=.5)
self.assertAllEqual([0, 0, 0], test_image[0, 0])
self.assertAllGreater(test_image[0, 1], 0)
self.assertAllGreater(test_image[1, 0], 0)
self.assertAllGreater(test_image[1, 1], 0)
self.assertAllEqual(test_image[0, 1], test_image[1, 0])
def test_draw_float_channel_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
channel = np.asarray([[0., 0.5],
[0., 1.]], dtype=np.float32)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
# The colormap ('bwr') maps the values as follows:
# 0.0 -> Blue
# 0.5 -> White
# 1.0 -> Red
visualization_utils.draw_float_channel_on_image_array(
test_image, channel, mask, alpha=1.0, cmap='bwr')
expected_result = np.asarray([[[0, 0, 0], [255, 254, 254]],
[[0, 0, 255], [255, 0, 0]]], dtype=np.uint8)
self.assertAllEqual(test_image, expected_result)
def test_draw_heatmaps_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
heatmaps = np.ones(shape=[10, 20, 1], dtype=float)
visualization_utils.draw_heatmaps_on_image(test_image, heatmaps)
width_final, height_final = test_image.size
pixels = list(test_image.getdata())
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
# The pixel shoud be painted as AliceBlue with RGB (240, 248, 255).
self.assertAllEqual((240, 248, 255), pixels[10])
def test_draw_heatmaps_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
heatmap1 = np.asarray([[1, 0],
[0, 1]], dtype=float)
heatmap2 = np.asarray([[0, 1],
[1, 0]], dtype=float)
heatmaps = np.stack([heatmap1, heatmap2], axis=0)
output_image = visualization_utils.draw_heatmaps_on_image_array(
test_image, heatmaps)
# Output image should be painted as "AliceBlue" at (0, 0), (1, 1)
# and "Chartreuse" at (0, 1), (1, 0).
self.assertAllEqual(
output_image,
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]))
def test_draw_heatmaps_on_image_tensors(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
heatmap1 = np.asarray([[1, 0],
[0, 1]], dtype=float)
heatmap2 = np.asarray([[0, 1],
[1, 0]], dtype=float)
heatmaps = np.stack([heatmap1, heatmap2], axis=0)
def graph_fn():
image_tensor = tf.constant(test_image, dtype=tf.uint8)
image_tensor = tf.expand_dims(image_tensor, axis=0)
heatmaps_tensor = tf.expand_dims(
tf.constant(heatmaps, dtype=tf.float32), axis=0)
output_image = visualization_utils.draw_heatmaps_on_image_tensors(
images=image_tensor,
heatmaps=heatmaps_tensor,
apply_sigmoid=False)
return output_image
output_image_np = self.execute(graph_fn, [])
self.assertAllEqual(
output_image_np,
np.expand_dims(
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]),
axis=0))
def test_add_cdf_image_summary(self):
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return cdf_image_summary
self.execute(graph_fn, [])
def test_add_hist_image_summary(self):
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return hist_image_summary
self.execute(graph_fn, [])
def test_eval_metric_ops(self):
if self.is_tf2():
self.skipTest('This test is only compatible with Tensorflow 1.X, '
'estimator eval ops are not supported in Tensorflow 2.')
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
eval_metric_ops = visualization_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=max_examples_to_draw,
summary_name_prefix=metric_op_base)
original_image = tf.placeholder(tf.uint8, [4, None, None, 3])
original_image_spatial_shape = tf.placeholder(tf.int32, [4, 2])
true_image_shape = tf.placeholder(tf.int32, [4, 3])
detection_boxes = tf.random_uniform([4, 20, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
detection_classes = tf.random_uniform([4, 20],
minval=1,
maxval=3,
dtype=tf.int64)
detection_scores = tf.random_uniform([4, 20],
minval=0.,
maxval=1.,
dtype=tf.float32)
groundtruth_boxes = tf.random_uniform([4, 8, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
num_groundtruth_boxes = tf.constant([3, 8, 0, 2], tf.int32)
groundtruth_classes = tf.random_uniform([4, 8],
minval=1,
maxval=3,
dtype=tf.int64)
eval_dict = {
fields.DetectionResultFields.detection_boxes:
detection_boxes,
fields.DetectionResultFields.detection_classes:
detection_classes,
fields.DetectionResultFields.detection_scores:
detection_scores,
fields.InputDataFields.original_image:
original_image,
fields.InputDataFields.original_image_spatial_shape: (
original_image_spatial_shape),
fields.InputDataFields.true_image_shape: (true_image_shape),
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes,
fields.InputDataFields.num_groundtruth_boxes:
num_groundtruth_boxes
}
metric_ops = eval_metric_ops.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops[next(six.iterkeys(metric_ops))]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_ops = {}
for key, (value_op, _) in six.iteritems(metric_ops):
value_ops[key] = value_op
# First run enough update steps to surpass `max_examples_to_draw`.
for i in range(max_examples_to_draw):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
for key, value_op in six.iteritems(value_ops_out):
self.assertNotEqual('', value_op)
# Now run fewer update steps than `max_examples_to_draw`. A single value
# op will be the empty string, since not enough image summaries can be
# produced.
for i in range(max_examples_to_draw - 1):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
self.assertEqual(
six.b(''),
value_ops_out[metric_op_base + '/' + str(max_examples_to_draw - 1)])
def test_visualize_boxes_and_labels_on_image_array(self):
ori_image = np.ones([360, 480, 3], dtype=np.int32) * 255
test_image = np.ones([360, 480, 3], dtype=np.int32) * 255
detections = np.array([[0.8, 0.1, 0.9, 0.1, 1., 0.1],
[0.1, 0.3, 0.8, 0.7, 1., 0.6]])
keypoints = np.array(np.random.rand(2, 5, 2), dtype=np.float32)
labelmap = {1: {'id': 1, 'name': 'cat'}, 2: {'id': 2, 'name': 'dog'}}
visualization_utils.visualize_boxes_and_labels_on_image_array(
test_image,
detections[:, :4],
detections[:, 4].astype(np.int32),
detections[:, 5],
labelmap,
keypoints=keypoints,
track_ids=None,
use_normalized_coordinates=True,
max_boxes_to_draw=1,
min_score_thresh=0.2,
agnostic_mode=False,
line_thickness=8)
self.assertGreater(np.abs(np.sum(test_image - ori_image)), 0)
if __name__ == '__main__':
tf.test.main()
| 25,752 | 40.6042 | 80 | py |
models | models-master/research/object_detection/utils/shape_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import static_shape
get_dim_as_int = static_shape.get_dim_as_int
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
# Computing the padding statically makes the operation work with XLA.
rank = len(t.get_shape())
paddings = [[0 for _ in range(2)] for _ in range(rank)]
t_d0 = tf.shape(t)[0]
if isinstance(length, int) or len(length.get_shape()) == 0: # pylint:disable=g-explicit-length-test
paddings[0][1] = length - t_d0
else:
paddings[0][1] = length[0] - t_d0
return tf.pad(t, paddings)
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last],
keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a'
' tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
| 17,950 | 34.902 | 102 | py |
models | models-master/research/object_detection/utils/ops_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import standard_fields as fields
from object_detection.utils import ops
from object_detection.utils import test_case
class NormalizedToImageCoordinatesTest(test_case.TestCase):
def test_normalized_to_image_coordinates(self):
normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]],
[[0.5, 0.5, 1.0, 1.0]]])
def graph_fn(normalized_boxes):
image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32)
absolute_boxes = ops.normalized_to_image_coordinates(
normalized_boxes, image_shape, parallel_iterations=2)
return absolute_boxes
expected_boxes = np.array([[[0, 0, 4, 4]],
[[2, 2, 4, 4]]])
absolute_boxes = self.execute(graph_fn, [normalized_boxes_np])
self.assertAllEqual(absolute_boxes, expected_boxes)
class ReduceSumTrailingDimensions(test_case.TestCase):
def test_reduce_sum_trailing_dimensions(self):
def graph_fn(input_tensor):
reduced_tensor = ops.reduce_sum_trailing_dimensions(input_tensor, ndims=2)
return reduced_tensor
reduced_np = self.execute(graph_fn, [np.ones((2, 2, 2), np.float32)])
self.assertAllClose(reduced_np, 2 * np.ones((2, 2), np.float32))
class MeshgridTest(test_case.TestCase):
def test_meshgrid_numpy_comparison(self):
"""Tests meshgrid op with vectors, for which it should match numpy."""
x = np.arange(4)
y = np.arange(6)
def graph_fn():
xgrid, ygrid = ops.meshgrid(x, y)
return xgrid, ygrid
exp_xgrid, exp_ygrid = np.meshgrid(x, y)
xgrid_output, ygrid_output = self.execute(graph_fn, [])
self.assertAllEqual(xgrid_output, exp_xgrid)
self.assertAllEqual(ygrid_output, exp_ygrid)
def test_meshgrid_multidimensional(self):
np.random.seed(18)
x = np.random.rand(4, 1, 2).astype(np.float32)
y = np.random.rand(2, 3).astype(np.float32)
grid_shape = list(y.shape) + list(x.shape)
def graph_fn():
xgrid, ygrid = ops.meshgrid(x, y)
self.assertEqual(xgrid.get_shape().as_list(), grid_shape)
self.assertEqual(ygrid.get_shape().as_list(), grid_shape)
return xgrid, ygrid
xgrid_output, ygrid_output = self.execute(graph_fn, [])
# Check the shape of the output grids
self.assertEqual(xgrid_output.shape, tuple(grid_shape))
self.assertEqual(ygrid_output.shape, tuple(grid_shape))
# Check a few elements
test_elements = [((3, 0, 0), (1, 2)),
((2, 0, 1), (0, 0)),
((0, 0, 0), (1, 1))]
for xind, yind in test_elements:
# These are float equality tests, but the meshgrid op should not introduce
# rounding.
self.assertEqual(xgrid_output[yind + xind], x[xind])
self.assertEqual(ygrid_output[yind + xind], y[yind])
class OpsTestFixedPadding(test_case.TestCase):
def test_3x3_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 3)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape)
def test_5x5_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 5)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape)
def test_3x3_atrous_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 3, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape)
class OpsTestPadToMultiple(test_case.TestCase):
def test_zero_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 1)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_no_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_non_square_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 4)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape)
class OpsTestPaddedOneHotEncoding(test_case.TestCase):
def test_correct_one_hot_tensor_with_no_pad(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=0)
return one_hot_tensor
expected_tensor = np.array([[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_one_hot_tensor_with_pad_one(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=1)
return one_hot_tensor
expected_tensor = np.array([[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_one_hot_tensor_with_pad_three(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=3)
return one_hot_tensor
expected_tensor = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_padded_one_hot_tensor_with_empty_indices(self):
depth = 6
pad = 2
def graph_fn():
indices = tf.constant([])
one_hot_tensor = ops.padded_one_hot_encoding(
indices, depth=depth, left_pad=pad)
return one_hot_tensor
expected_tensor = np.zeros((0, depth + pad))
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_return_none_on_zero_depth(self):
indices = tf.constant([1, 2, 3, 4, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=0, left_pad=2)
self.assertEqual(one_hot_tensor, None)
def test_raise_value_error_on_rank_two_input(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=2)
def test_raise_value_error_on_negative_pad(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=-1)
def test_raise_value_error_on_float_pad(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=0.1)
def test_raise_value_error_on_float_depth(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=0.1, left_pad=2)
class OpsDenseToSparseBoxesTest(test_case.TestCase):
def test_return_all_boxes_when_all_input_boxes_are_valid(self):
num_classes = 4
num_valid_boxes = 3
code_size = 4
def graph_fn(dense_location, dense_num_boxes):
box_locations, box_classes = ops.dense_to_sparse_boxes(
dense_location, dense_num_boxes, num_classes)
return box_locations, box_classes
dense_location_np = np.random.uniform(size=[num_valid_boxes, code_size])
dense_num_boxes_np = np.array([1, 0, 0, 2], dtype=np.int32)
expected_box_locations = dense_location_np
expected_box_classses = np.array([0, 3, 3])
# Executing on CPU only since output shape is not constant.
box_locations, box_classes = self.execute_cpu(
graph_fn, [dense_location_np, dense_num_boxes_np])
self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6,
atol=1e-6)
self.assertAllEqual(box_classes, expected_box_classses)
def test_return_only_valid_boxes_when_input_contains_invalid_boxes(self):
num_classes = 4
num_valid_boxes = 3
num_boxes = 10
code_size = 4
def graph_fn(dense_location, dense_num_boxes):
box_locations, box_classes = ops.dense_to_sparse_boxes(
dense_location, dense_num_boxes, num_classes)
return box_locations, box_classes
dense_location_np = np.random.uniform(size=[num_boxes, code_size])
dense_num_boxes_np = np.array([1, 0, 0, 2], dtype=np.int32)
expected_box_locations = dense_location_np[:num_valid_boxes]
expected_box_classses = np.array([0, 3, 3])
# Executing on CPU only since output shape is not constant.
box_locations, box_classes = self.execute_cpu(
graph_fn, [dense_location_np, dense_num_boxes_np])
self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6,
atol=1e-6)
self.assertAllEqual(box_classes, expected_box_classses)
class OpsTestIndicesToDenseVector(test_case.TestCase):
def test_indices_to_dense_vector(self):
size = 10000
num_indices = np.random.randint(size)
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_size_at_inference(self):
size = 5000
num_indices = 250
all_indices = np.arange(size)
rand_indices = np.random.permutation(all_indices)[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
def graph_fn(tf_all_indices):
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices,
tf.shape(tf_all_indices)[0])
return indicator
output = self.execute(graph_fn, [all_indices])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_int(self):
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices, size, 1, dtype=tf.int64)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_custom_values(self):
size = 100
num_indices = 10
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
indices_value = np.random.rand(1)
default_value = np.random.rand(1)
expected_output = np.float32(np.ones(size) * default_value)
expected_output[rand_indices] = indices_value
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices,
size,
indices_value=indices_value,
default_value=default_value)
return indicator
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_all_indices_as_input(self):
size = 500
num_indices = 500
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.ones(size, dtype=np.float32)
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_empty_indices_as_input(self):
size = 500
rand_indices = []
expected_output = np.zeros(size, dtype=np.float32)
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
class GroundtruthFilterTest(test_case.TestCase):
def test_filter_groundtruth(self):
def graph_fn(input_image, input_boxes, input_classes, input_is_crowd,
input_area, input_difficult, input_label_types,
input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_label_types: input_label_types,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_image = np.random.rand(224, 224, 3)
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=np.float32)
input_classes = np.array([1, 2], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=bool)
input_area = np.array([32, 48], dtype=np.float32)
input_difficult = np.array([True, False], dtype=bool)
input_label_types = np.array(['APPROPRIATE', 'INCORRECT'],
dtype=np.string_)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
valid_indices = np.array([0], dtype=np.int32)
# Strings are not supported on TPU.
output_tensors = self.execute_cpu(
graph_fn,
[input_image, input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_label_types, input_confidences, valid_indices]
)
expected_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [32],
fields.InputDataFields.groundtruth_difficult: [True],
fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
for key in [fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_label_types]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_missing_fields(self):
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=float)
input_classes = np.array([1, 2], dtype=np.int32)
valid_indices = np.array([0], dtype=np.int32)
expected_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes:
[1]
}
def graph_fn(input_boxes, input_classes, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
output_tensors = self.execute(graph_fn, [input_boxes, input_classes,
valid_indices])
for key in [fields.InputDataFields.groundtruth_boxes]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_fields(self):
def graph_fn(input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=float)
input_classes = np.array([1, 2], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=bool)
input_area = np.array([], dtype=np.float32)
input_difficult = np.array([], dtype=np.float32)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
valid_indices = np.array([0], dtype=np.int32)
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [],
fields.InputDataFields.groundtruth_difficult: [],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [
input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_groundtruth_boxes(self):
def graph_fn(input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_boxes = np.array([], dtype=float).reshape(0, 4)
input_classes = np.array([], dtype=np.int32)
input_is_crowd = np.array([], dtype=bool)
input_area = np.array([], dtype=np.float32)
input_difficult = np.array([], dtype=np.float32)
input_confidences = np.array([], dtype=np.float32)
valid_indices = np.array([], dtype=np.int32)
output_tensors = self.execute(graph_fn, [input_boxes, input_classes,
input_is_crowd, input_area,
input_difficult,
input_confidences,
valid_indices])
for key in output_tensors:
if key == fields.InputDataFields.groundtruth_boxes:
self.assertAllEqual([0, 4], output_tensors[key].shape)
else:
self.assertAllEqual([0], output_tensors[key].shape)
class RetainGroundTruthWithPositiveClasses(test_case.TestCase):
def test_filter_groundtruth_with_positive_classes(self):
def graph_fn(input_image, input_boxes, input_classes, input_is_crowd,
input_area, input_difficult, input_label_types,
input_confidences):
input_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_label_types: input_label_types,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth_with_positive_classes(
input_tensors)
return output_tensors
input_image = np.random.rand(224, 224, 3)
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=float)
input_classes = np.array([1, 0], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=bool)
input_area = np.array([32, 48], dtype=np.float32)
input_difficult = np.array([True, False], dtype=bool)
input_label_types = np.array(['APPROPRIATE', 'INCORRECT'],
dtype=np.string_)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
expected_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [32],
fields.InputDataFields.groundtruth_difficult: [True],
fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
# Executing on CPU because string types are not supported on TPU.
output_tensors = self.execute_cpu(graph_fn,
[input_image, input_boxes,
input_classes, input_is_crowd,
input_area,
input_difficult, input_label_types,
input_confidences])
for key in [fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_label_types]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class ReplaceNaNGroundtruthLabelScoresWithOnes(test_case.TestCase):
def test_replace_nan_groundtruth_label_scores_with_ones(self):
def graph_fn():
label_scores = tf.constant([np.nan, 1.0, np.nan])
output_tensor = ops.replace_nan_groundtruth_label_scores_with_ones(
label_scores)
return output_tensor
expected_tensor = [1.0, 1.0, 1.0]
output_tensor = self.execute(graph_fn, [])
self.assertAllClose(expected_tensor, output_tensor)
def test_input_equals_output_when_no_nans(self):
input_label_scores = [0.5, 1.0, 1.0]
def graph_fn():
label_scores_tensor = tf.constant(input_label_scores)
output_label_scores = ops.replace_nan_groundtruth_label_scores_with_ones(
label_scores_tensor)
return output_label_scores
output_label_scores = self.execute(graph_fn, [])
self.assertAllClose(input_label_scores, output_label_scores)
class GroundtruthFilterWithCrowdBoxesTest(test_case.TestCase):
def test_filter_groundtruth_with_crowd_boxes(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[0.1, 0.2, 0.6, 0.8], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1, 2],
fields.InputDataFields.groundtruth_is_crowd: [True, False],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_groundtruth_with_crowd_boxes(
input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class GroundtruthFilterWithNanBoxTest(test_case.TestCase):
def test_filter_groundtruth_with_nan_box_coordinates(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[np.nan, np.nan, np.nan, np.nan], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1, 2],
fields.InputDataFields.groundtruth_is_crowd: [False, True],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_groundtruth_with_nan_box_coordinates(
input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [True],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class GroundtruthFilterWithUnrecognizedClassesTest(test_case.TestCase):
def test_filter_unrecognized_classes(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[.3, .3, .5, .7], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [-1, 2],
fields.InputDataFields.groundtruth_is_crowd: [False, True],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_unrecognized_classes(input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [True],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class OpsTestNormalizeToTarget(test_case.TestCase):
def test_create_normalize_to_target(self):
if self.is_tf2():
self.skipTest('Skipping as variable names not supported in eager mode.')
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = 4.0
dim = 3
with self.test_session():
output = ops.normalize_to_target(inputs, target_norm_value, dim)
self.assertEqual(output.op.name, 'NormalizeToTarget/mul')
var_name = slim.get_variables()[0].name
self.assertEqual(var_name, 'NormalizeToTarget/weights:0')
def test_invalid_dim(self):
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = 4.0
dim = 10
with self.assertRaisesRegexp(
ValueError,
'dim must be non-negative but smaller than the input rank.'):
ops.normalize_to_target(inputs, target_norm_value, dim)
def test_invalid_target_norm_values(self):
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = [4.0, 4.0]
dim = 3
with self.assertRaisesRegexp(
ValueError, 'target_norm_value must be a float or a list of floats'):
ops.normalize_to_target(inputs, target_norm_value, dim)
def test_correct_output_shape(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
inputs = np.random.uniform(size=(5, 10, 12, 3)).astype(np.float32)
def graph_fn(inputs):
target_norm_value = 4.0
dim = 3
output = ops.normalize_to_target(inputs, target_norm_value, dim)
return output
# Executing on CPU since creating a variable inside a conditional is not
# supported.
outputs = self.execute_cpu(graph_fn, [inputs])
self.assertEqual(outputs.shape, inputs.shape)
def test_correct_initial_output_values(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
def graph_fn():
inputs = tf.constant([[[[3, 4], [7, 24]],
[[5, -12], [-1, 0]]]], tf.float32)
target_norm_value = 10.0
dim = 3
normalized_inputs = ops.normalize_to_target(inputs, target_norm_value,
dim)
return normalized_inputs
expected_output = [[[[30/5.0, 40/5.0], [70/25.0, 240/25.0]],
[[50/13.0, -120/13.0], [-10, 0]]]]
# Executing on CPU since creating a variable inside a conditional is not
# supported.
output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_multiple_target_norm_values(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
def graph_fn():
inputs = tf.constant([[[[3, 4], [7, 24]],
[[5, -12], [-1, 0]]]], tf.float32)
target_norm_value = [10.0, 20.0]
dim = 3
normalized_inputs = ops.normalize_to_target(inputs, target_norm_value,
dim)
return normalized_inputs
expected_output = [[[[30/5.0, 80/5.0], [70/25.0, 480/25.0]],
[[50/13.0, -240/13.0], [-10, 0]]]]
# Executing on CPU since creating a variable inside a conditional is not
# supported.
output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
class OpsTestPositionSensitiveCropRegions(test_case.TestCase):
def test_position_sensitive(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
# The result for both boxes should be [[1, 2], [3, 4], [5, 6]]
# before averaging.
expected_output = np.array([3.5, 3.5]).reshape([2, 1, 1, 1])
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
def graph_fn():
# First channel is 1's, second channel is 2's, etc.
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((2, 4))
# pylint:disable=cell-var-from-loop
ps_crop_and_pool = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
return ps_crop_and_pool
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_position_sensitive_with_equal_channels(self):
num_spatial_bins = [2, 2]
image_shape = [3, 3, 4]
crop_size = [2, 2]
def graph_fn():
image = tf.constant(
list(range(1, 3 * 3 + 1)), dtype=tf.float32, shape=[3, 3, 1])
tiled_image = tf.tile(image, [1, 1, image_shape[2]])
boxes = tf.random_uniform((3, 4))
box_ind = tf.constant([0, 0, 0], dtype=tf.int32)
# All channels are equal so position-sensitive crop and resize should
# work as the usual crop and resize for just one channel.
crop = tf.image.crop_and_resize(tf.expand_dims(image, axis=0), boxes,
box_ind, crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
ps_crop_and_pool = ops.position_sensitive_crop_regions(
tiled_image,
boxes,
crop_size,
num_spatial_bins,
global_pool=True)
return crop_and_pool, ps_crop_and_pool
# Crop and resize op is not supported in TPUs.
expected_output, output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_raise_value_error_on_num_bins_less_than_one(self):
num_spatial_bins = [1, -1]
image_shape = [1, 1, 2]
crop_size = [2, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'num_spatial_bins should be >= 1'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
def test_raise_value_error_on_non_divisible_crop_size(self):
num_spatial_bins = [2, 3]
image_shape = [1, 1, 6]
crop_size = [3, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(
ValueError, 'crop_size should be divisible by num_spatial_bins'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
def test_raise_value_error_on_non_divisible_num_channels(self):
num_spatial_bins = [2, 2]
image_shape = [1, 1, 5]
crop_size = [2, 2]
def graph_fn():
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
return ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
with self.assertRaisesRegexp(
ValueError, 'Dimension size must be evenly divisible by 4 but is 5'):
self.execute(graph_fn, [])
def test_position_sensitive_with_global_pool_false(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
num_boxes = 2
expected_output = []
# Expected output, when crop_size = [3, 2].
expected_output.append(np.expand_dims(
np.tile(np.array([[1, 2],
[3, 4],
[5, 6]]), (num_boxes, 1, 1)),
axis=-1))
# Expected output, when crop_size = [6, 4].
expected_output.append(np.expand_dims(
np.tile(np.array([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 3, 4, 4],
[3, 3, 4, 4],
[5, 5, 6, 6],
[5, 5, 6, 6]]), (num_boxes, 1, 1)),
axis=-1))
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
# First channel is 1's, second channel is 2's, etc.
def graph_fn():
# pylint:disable=cell-var-from-loop
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((num_boxes, 4))
ps_crop = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output[crop_size_mult - 1])
def test_position_sensitive_with_global_pool_false_and_do_global_pool(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
num_boxes = 2
expected_output = []
# Expected output, when crop_size = [3, 2].
expected_output.append(np.mean(
np.expand_dims(
np.tile(np.array([[1, 2],
[3, 4],
[5, 6]]), (num_boxes, 1, 1)),
axis=-1),
axis=(1, 2), keepdims=True))
# Expected output, when crop_size = [6, 4].
expected_output.append(np.mean(
np.expand_dims(
np.tile(np.array([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 3, 4, 4],
[3, 3, 4, 4],
[5, 5, 6, 6],
[5, 5, 6, 6]]), (num_boxes, 1, 1)),
axis=-1),
axis=(1, 2), keepdims=True))
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
def graph_fn():
# pylint:disable=cell-var-from-loop
# First channel is 1's, second channel is 2's, etc.
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((num_boxes, 4))
# Perform global_pooling after running the function with
# global_pool=False.
ps_crop = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
ps_crop_and_pool = tf.reduce_mean(
ps_crop, reduction_indices=(1, 2), keepdims=True)
return ps_crop_and_pool
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output[crop_size_mult - 1])
def test_raise_value_error_on_non_square_block_size(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
crop_size = [6, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(
ValueError, 'Only support square bin crop size for now.'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
class OpsTestBatchPositionSensitiveCropRegions(test_case.TestCase):
def test_position_sensitive_with_single_bin(self):
num_spatial_bins = [1, 1]
image_shape = [2, 3, 3, 4]
crop_size = [2, 2]
def graph_fn():
image = tf.random_uniform(image_shape)
boxes = tf.random_uniform((2, 3, 4))
box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)
# When a single bin is used, position-sensitive crop and pool should be
# the same as non-position sensitive crop and pool.
crop = tf.image.crop_and_resize(image,
tf.reshape(boxes, [-1, 4]), box_ind,
crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4])
ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
return crop_and_pool, ps_crop_and_pool
# Crop and resize is not supported on TPUs.
expected_output, output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
num_spatial_bins = [2, 2]
image_shape = [2, 2, 2, 4]
crop_size = [2, 2]
# box_ind = tf.constant([0, 1], dtype=tf.int32)
expected_output = []
# Expected output, when the box containing whole image.
expected_output.append(
np.reshape(np.array([[4, 7],
[10, 13]]),
(1, 2, 2, 1))
)
# Expected output, when the box containing only first row.
expected_output.append(
np.reshape(np.array([[3, 6],
[7, 10]]),
(1, 2, 2, 1))
)
expected_output = np.stack(expected_output, axis=0)
def graph_fn():
images = tf.constant(
list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32,
shape=image_shape)
# First box contains whole image, and second box contains only first row.
boxes = tf.constant(np.array([[[0., 0., 1., 1.]],
[[0., 0., 0.5, 1.]]]), dtype=tf.float32)
ps_crop = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
def test_position_sensitive_with_global_pool_false_and_single_bin(self):
num_spatial_bins = [1, 1]
image_shape = [2, 3, 3, 4]
crop_size = [1, 1]
def graph_fn():
images = tf.random_uniform(image_shape)
boxes = tf.random_uniform((2, 3, 4))
# box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)
# Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize),
# the outputs are the same whatever the global_pool value is.
ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=True)
ps_crop = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop_and_pool, ps_crop
pooled_output, unpooled_output = self.execute(graph_fn, [])
self.assertAllClose(pooled_output, unpooled_output)
# The following tests are only executed on CPU because the output
# shape is not constant.
class ReframeBoxMasksToImageMasksTest(test_case.TestCase,
parameterized.TestCase):
def test_reframe_image_corners_relative_to_boxes(self):
def graph_fn():
return ops.reframe_image_corners_relative_to_boxes(
tf.constant([[0.1, 0.2, 0.3, 0.4]]))
np_boxes = self.execute_cpu(graph_fn, [])
self.assertAllClose(np_boxes, [[-0.5, -1, 4.5, 4.]])
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testZeroImageOnEmptyMask(self, mask_dtype, mask_dtype_np, resize_method):
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
def graph_fn():
box_masks = tf.constant([[[0, 0],
[0, 0]]], dtype=mask_dtype)
boxes = tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testZeroBoxMasks(self, mask_dtype, mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.zeros([0, 3, 3], dtype=mask_dtype)
boxes = tf.zeros([0, 4], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllEqual(np_image_masks.shape, np.array([0, 4, 4]))
def testBoxWithZeroArea(self):
def graph_fn():
box_masks = tf.zeros([1, 3, 3], dtype=tf.float32)
boxes = tf.constant([[0.1, 0.2, 0.1, 0.7]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes,
image_height=4,
image_width=4)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertAllEqual(np_image_masks.shape, np.array([1, 4, 4]))
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testMaskIsCenteredInImageWhenBoxIsCentered(self, mask_dtype,
mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.constant([[[4, 4],
[4, 4]]], dtype=mask_dtype)
boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 4, 4, 0],
[0, 4, 4, 0],
[0, 0, 0, 0]]], dtype=mask_dtype_np)
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testMaskOffCenterRemainsOffCenterInImage(self, mask_dtype,
mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.constant([[[1, 0],
[0, 1]]], dtype=mask_dtype)
boxes = tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
if mask_dtype == tf.float32 and resize_method == 'bilinear':
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 0.6111111, 0.16666669],
[0, 0, 0.3888889, 0.83333337],
[0, 0, 0, 0]]], dtype=np.float32)
else:
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]], dtype=mask_dtype_np)
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
class MergeBoxesWithMultipleLabelsTest(test_case.TestCase):
def testMergeBoxesWithMultipleLabels(self):
def graph_fn():
boxes = tf.constant(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.25, 0.25, 0.75, 0.75]],
dtype=tf.float32)
class_indices = tf.constant([0, 4, 2], dtype=tf.int32)
class_confidences = tf.constant([0.8, 0.2, 0.1], dtype=tf.float32)
num_classes = 5
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
expected_merged_boxes = np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32)
expected_merged_classes = np.array(
[[1, 0, 1, 0, 0], [0, 0, 0, 0, 1]], dtype=np.int32)
expected_merged_confidences = np.array(
[[0.8, 0, 0.1, 0, 0], [0, 0, 0, 0, 0.2]], dtype=np.float32)
expected_merged_box_indices = np.array([0, 1], dtype=np.int32)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllClose(np_merged_boxes, expected_merged_boxes)
self.assertAllClose(np_merged_classes, expected_merged_classes)
self.assertAllClose(np_merged_confidences, expected_merged_confidences)
self.assertAllClose(np_merged_box_indices, expected_merged_box_indices)
def testMergeBoxesWithMultipleLabelsCornerCase(self):
def graph_fn():
boxes = tf.constant(
[[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1],
[1, 1, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [0, 0, 1, 1]],
dtype=tf.float32)
class_indices = tf.constant([0, 1, 2, 3, 2, 1, 0, 3], dtype=tf.int32)
class_confidences = tf.constant([0.1, 0.9, 0.2, 0.8, 0.3, 0.7, 0.4, 0.6],
dtype=tf.float32)
num_classes = 4
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
expected_merged_boxes = np.array(
[[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]],
dtype=np.float32)
expected_merged_classes = np.array(
[[1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]],
dtype=np.int32)
expected_merged_confidences = np.array(
[[0.1, 0, 0, 0.6], [0.4, 0.9, 0, 0],
[0, 0.7, 0.2, 0], [0, 0, 0.3, 0.8]], dtype=np.float32)
expected_merged_box_indices = np.array([0, 1, 2, 3], dtype=np.int32)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllClose(np_merged_boxes, expected_merged_boxes)
self.assertAllClose(np_merged_classes, expected_merged_classes)
self.assertAllClose(np_merged_confidences, expected_merged_confidences)
self.assertAllClose(np_merged_box_indices, expected_merged_box_indices)
def testMergeBoxesWithEmptyInputs(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
class_indices = tf.constant([], dtype=tf.int32)
class_confidences = tf.constant([], dtype=tf.float32)
num_classes = 5
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(np_merged_boxes.shape, [0, 4])
self.assertAllEqual(np_merged_classes.shape, [0, 5])
self.assertAllEqual(np_merged_confidences.shape, [0, 5])
self.assertAllEqual(np_merged_box_indices.shape, [0])
def testMergeBoxesWithMultipleLabelsUsesInt64(self):
if self.is_tf2():
self.skipTest('Getting op names is not supported in eager mode.')
boxes = tf.constant(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.25, 0.25, 0.75, 0.75]],
dtype=tf.float32)
class_indices = tf.constant([0, 4, 2], dtype=tf.int32)
class_confidences = tf.constant([0.8, 0.2, 0.1], dtype=tf.float32)
num_classes = 5
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes)
graph = tf.get_default_graph()
def assert_dtype_is_int64(op_name):
op = graph.get_operation_by_name(op_name)
self.assertEqual(op.get_attr('dtype'), tf.int64)
def assert_t_is_int64(op_name):
op = graph.get_operation_by_name(op_name)
self.assertEqual(op.get_attr('T'), tf.int64)
assert_dtype_is_int64('map/TensorArray')
assert_dtype_is_int64('map/TensorArray_1')
assert_dtype_is_int64('map/while/TensorArrayReadV3')
assert_t_is_int64('map/while/TensorArrayWrite/TensorArrayWriteV3')
assert_t_is_int64(
'map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3')
assert_dtype_is_int64('map/TensorArrayStack/TensorArrayGatherV3')
class NearestNeighborUpsamplingTest(test_case.TestCase):
def test_upsampling_with_single_scale(self):
def graph_fn(inputs):
custom_op_output = ops.nearest_neighbor_upsampling(inputs, scale=2)
return custom_op_output
inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1])
custom_op_output = self.execute(graph_fn, [inputs])
expected_output = [[[[0], [0], [1], [1]],
[[0], [0], [1], [1]],
[[2], [2], [3], [3]],
[[2], [2], [3], [3]]]]
self.assertAllClose(custom_op_output, expected_output)
def test_upsampling_with_separate_height_width_scales(self):
def graph_fn(inputs):
custom_op_output = ops.nearest_neighbor_upsampling(inputs,
height_scale=2,
width_scale=3)
return custom_op_output
inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1])
custom_op_output = self.execute(graph_fn, [inputs])
expected_output = [[[[0], [0], [0], [1], [1], [1]],
[[0], [0], [0], [1], [1], [1]],
[[2], [2], [2], [3], [3], [3]],
[[2], [2], [2], [3], [3], [3]]]]
self.assertAllClose(custom_op_output, expected_output)
class MatmulGatherOnZerothAxis(test_case.TestCase):
def test_gather_2d(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([2, 2, 1], dtype=np.int32)
expected_output = np.array([[9, 10, 11, 12], [9, 10, 11, 12], [5, 6, 7, 8]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_3d(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[0, 1], [0, 0]]], dtype=np.float32)
indices = np.array([0, 3, 1], dtype=np.int32)
expected_output = np.array([[[1, 2], [3, 4]],
[[0, 1], [0, 0]],
[[5, 6], [7, 8]]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_with_many_indices(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([0, 0, 0, 0, 0, 0], dtype=np.int32)
expected_output = np.array(6*[[1, 2, 3, 4]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_with_dynamic_shape_input(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([0, 0, 0, 0, 0, 0])
expected_output = np.array(6*[[1, 2, 3, 4]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
class FpnFeatureLevelsTest(test_case.TestCase):
def test_correct_fpn_levels(self):
image_size = 640
pretraininig_image_size = 224
image_ratio = image_size * 1.0 / pretraininig_image_size
boxes = np.array(
[
[
[0, 0, 111, 111], # Level 0.
[0, 0, 113, 113], # Level 1.
[0, 0, 223, 223], # Level 1.
[0, 0, 225, 225], # Level 2.
[0, 0, 449, 449] # Level 3.
],
],
dtype=np.float32) / image_size
def graph_fn(boxes):
return ops.fpn_feature_levels(
num_levels=5, unit_scale_index=2, image_ratio=image_ratio,
boxes=boxes)
levels = self.execute(graph_fn, [boxes])
self.assertAllEqual([[0, 1, 1, 2, 3]], levels)
class TestBfloat16ToFloat32(test_case.TestCase):
def test_convert_list(self):
var_list = [
tf.constant([1.], dtype=tf.bfloat16),
tf.constant([2], dtype=tf.int32)
]
casted_var_list = ops.bfloat16_to_float32_nested(var_list)
self.assertEqual(casted_var_list[0].dtype, tf.float32)
self.assertEqual(casted_var_list[1].dtype, tf.int32)
def test_convert_tensor_dict(self):
tensor_dict = {
'key1': tf.constant([1.], dtype=tf.bfloat16),
'key2': [
tf.constant([0.5], dtype=tf.bfloat16),
tf.constant([7], dtype=tf.int32),
],
'key3': tf.constant([2], dtype=tf.uint8),
}
tensor_dict = ops.bfloat16_to_float32_nested(tensor_dict)
self.assertEqual(tensor_dict['key1'].dtype, tf.float32)
self.assertEqual(tensor_dict['key2'][0].dtype, tf.float32)
self.assertEqual(tensor_dict['key2'][1].dtype, tf.int32)
self.assertEqual(tensor_dict['key3'].dtype, tf.uint8)
class TestGatherWithPaddingValues(test_case.TestCase):
def test_gather_with_padding_values(self):
expected_gathered_tensor = [
[0, 0, 0.2, 0.2],
[0, 0, 0, 0],
[0, 0, 0.1, 0.1],
[0, 0, 0, 0],
]
def graph_fn():
indices = tf.constant([1, -1, 0, -1])
input_tensor = tf.constant([[0, 0, 0.1, 0.1], [0, 0, 0.2, 0.2]],
dtype=tf.float32)
gathered_tensor = ops.gather_with_padding_values(
input_tensor,
indices=indices,
padding_value=tf.zeros_like(input_tensor[0]))
self.assertEqual(gathered_tensor.dtype, tf.float32)
return gathered_tensor
gathered_tensor_np = self.execute(graph_fn, [])
self.assertAllClose(expected_gathered_tensor, gathered_tensor_np)
class TestGIoU(test_case.TestCase):
def test_giou_with_no_overlap(self):
expected_giou_tensor = [
0, -1/3, -3/4, 0, -98/100
]
def graph_fn():
boxes1 = tf.constant([[3, 4, 5, 6], [3, 3, 5, 5],
[0, 0, 0, 0], [3, 3, 5, 5],
[9, 9, 10, 10]],
dtype=tf.float32)
boxes2 = tf.constant([[3, 2, 5, 4], [3, 7, 5, 9],
[5, 5, 10, 10], [3, 5, 5, 7],
[0, 0, 1, 1]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_overlaps(self):
expected_giou_tensor = [
1/25, 1/4, 1/3, 1/7 - 2/9
]
def graph_fn():
boxes1 = tf.constant([[2, 1, 7, 6], [2, 2, 4, 4],
[2, 2, 4, 4], [2, 2, 4, 4]],
dtype=tf.float32)
boxes2 = tf.constant([[4, 3, 5, 4], [3, 3, 4, 4],
[2, 3, 4, 5], [3, 3, 5, 5]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_perfect_overlap(self):
expected_giou_tensor = [1]
def graph_fn():
boxes1 = tf.constant([[3, 3, 5, 5]], dtype=tf.float32)
boxes2 = tf.constant([[3, 3, 5, 5]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_zero_area_boxes(self):
expected_giou_tensor = [0]
def graph_fn():
boxes1 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32)
boxes2 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_different_with_l1_same(self):
expected_giou_tensor = [
2/3, 3/5
]
def graph_fn():
boxes1 = tf.constant([[3, 3, 5, 5], [3, 3, 5, 5]], dtype=tf.float32)
boxes2 = tf.constant([[3, 2.5, 5, 5.5], [3, 2.5, 5, 4.5]],
dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
class TestCoordinateConversion(test_case.TestCase):
def test_coord_conv(self):
expected_box_tensor = [
[0.5, 0.5, 5.5, 5.5], [2, 1, 4, 7], [0, 0, 0, 0]
]
def graph_fn():
boxes = tf.constant([[3, 3, 5, 5], [3, 4, 2, 6], [0, 0, 0, 0]],
dtype=tf.float32)
converted = ops.center_to_corner_coordinate(boxes)
self.assertEqual(converted.dtype, tf.float32)
return converted
converted = self.execute(graph_fn, [])
self.assertAllClose(expected_box_tensor, converted)
if __name__ == '__main__':
tf.test.main()
| 67,306 | 37.199205 | 80 | py |
models | models-master/research/object_detection/utils/colab_utils.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for colab tutorials located in object_detection/colab_tutorials/..."""
import base64
import io
import json
from typing import Dict
from typing import List
from typing import Union
import uuid
from IPython.display import display
from IPython.display import Javascript
import numpy as np
from PIL import Image
from google.colab import output
from google.colab.output import eval_js
def image_from_numpy(image):
"""Open an image at the specified path and encode it in Base64.
Args:
image: np.ndarray
Image represented as a numpy array
Returns:
An encoded Base64 representation of the image
"""
with io.BytesIO() as img_output:
Image.fromarray(image).save(img_output, format='JPEG')
data = img_output.getvalue()
data = str(base64.b64encode(data))[2:-1]
return data
def draw_bbox(image_urls, callbackId): # pylint: disable=invalid-name
"""Open the bounding box UI and send the results to a callback function.
Args:
image_urls: list[str | np.ndarray]
List of locations from where to load the images from. If a np.ndarray is
given, the array is interpretted as an image and sent to the frontend.
If a str is given, the string is interpreted as a path and is read as a
np.ndarray before being sent to the frontend.
callbackId: str
The ID for the callback function to send the bounding box results to
when the user hits submit.
"""
js = Javascript('''
async function load_image(imgs, callbackId) {
//init organizational elements
const div = document.createElement('div');
var image_cont = document.createElement('div');
var errorlog = document.createElement('div');
var crosshair_h = document.createElement('div');
crosshair_h.style.position = "absolute";
crosshair_h.style.backgroundColor = "transparent";
crosshair_h.style.width = "100%";
crosshair_h.style.height = "0px";
crosshair_h.style.zIndex = 9998;
crosshair_h.style.borderStyle = "dotted";
crosshair_h.style.borderWidth = "2px";
crosshair_h.style.borderColor = "rgba(255, 0, 0, 0.75)";
crosshair_h.style.cursor = "crosshair";
var crosshair_v = document.createElement('div');
crosshair_v.style.position = "absolute";
crosshair_v.style.backgroundColor = "transparent";
crosshair_v.style.width = "0px";
crosshair_v.style.height = "100%";
crosshair_v.style.zIndex = 9999;
crosshair_v.style.top = "0px";
crosshair_v.style.borderStyle = "dotted";
crosshair_v.style.borderWidth = "2px";
crosshair_v.style.borderColor = "rgba(255, 0, 0, 0.75)";
crosshair_v.style.cursor = "crosshair";
crosshair_v.style.marginTop = "23px";
var brdiv = document.createElement('br');
//init control elements
var next = document.createElement('button');
var prev = document.createElement('button');
var submit = document.createElement('button');
var deleteButton = document.createElement('button');
var deleteAllbutton = document.createElement('button');
//init image containers
var image = new Image();
var canvas_img = document.createElement('canvas');
var ctx = canvas_img.getContext("2d");
canvas_img.style.cursor = "crosshair";
canvas_img.setAttribute('draggable', false);
crosshair_v.setAttribute('draggable', false);
crosshair_h.setAttribute('draggable', false);
// bounding box containers
const height = 600
var allBoundingBoxes = [];
var curr_image = 0
var im_height = 0;
var im_width = 0;
//initialize bounding boxes
for (var i = 0; i < imgs.length; i++) {
allBoundingBoxes[i] = [];
}
//initialize image view
errorlog.id = 'errorlog';
image.style.display = 'block';
image.setAttribute('draggable', false);
//load the first image
img = imgs[curr_image];
image.src = "data:image/png;base64," + img;
image.onload = function() {
// normalize display height and canvas
image.height = height;
image_cont.height = canvas_img.height = image.height;
image_cont.width = canvas_img.width = image.naturalWidth;
crosshair_v.style.height = image_cont.height + "px";
crosshair_h.style.width = image_cont.width + "px";
// draw the new image
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
};
// move to next image in array
next.textContent = "next image";
next.onclick = function(){
if (curr_image < imgs.length - 1){
// clear canvas and load new image
curr_image += 1;
errorlog.innerHTML = "";
}
else{
errorlog.innerHTML = "All images completed!!";
}
resetcanvas();
}
//move forward through list of images
prev.textContent = "prev image"
prev.onclick = function(){
if (curr_image > 0){
// clear canvas and load new image
curr_image -= 1;
errorlog.innerHTML = "";
}
else{
errorlog.innerHTML = "at the beginning";
}
resetcanvas();
}
// on delete, deletes the last bounding box
deleteButton.textContent = "undo bbox";
deleteButton.onclick = function(){
boundingBoxes.pop();
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
image.src = "data:image/png;base64," + img;
image.onload = function() {
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
boundingBoxes.map(r => {drawRect(r)});
};
}
// on all delete, deletes all of the bounding box
deleteAllbutton.textContent = "delete all"
deleteAllbutton.onclick = function(){
boundingBoxes = [];
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
image.src = "data:image/png;base64," + img;
image.onload = function() {
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
//boundingBoxes.map(r => {drawRect(r)});
};
}
// on submit, send the boxes to display
submit.textContent = "submit";
submit.onclick = function(){
errorlog.innerHTML = "";
// send box data to callback fucntion
google.colab.kernel.invokeFunction(callbackId, [allBoundingBoxes], {});
}
// init template for annotations
const annotation = {
x: 0,
y: 0,
w: 0,
h: 0,
};
// the array of all rectangles
let boundingBoxes = allBoundingBoxes[curr_image];
// the actual rectangle, the one that is being drawn
let o = {};
// a variable to store the mouse position
let m = {},
// a variable to store the point where you begin to draw the
// rectangle
start = {};
// a boolean variable to store the drawing state
let isDrawing = false;
var elem = null;
function handleMouseDown(e) {
// on mouse click set change the cursor and start tracking the mouse position
start = oMousePos(canvas_img, e);
// configure is drawing to true
isDrawing = true;
}
function handleMouseMove(e) {
// move crosshairs, but only within the bounds of the canvas
if (document.elementsFromPoint(e.pageX, e.pageY).includes(canvas_img)) {
crosshair_h.style.top = e.pageY + "px";
crosshair_v.style.left = e.pageX + "px";
}
// move the bounding box
if(isDrawing){
m = oMousePos(canvas_img, e);
draw();
}
}
function handleMouseUp(e) {
if (isDrawing) {
// on mouse release, push a bounding box to array and draw all boxes
isDrawing = false;
const box = Object.create(annotation);
// calculate the position of the rectangle
if (o.w > 0){
box.x = o.x;
}
else{
box.x = o.x + o.w;
}
if (o.h > 0){
box.y = o.y;
}
else{
box.y = o.y + o.h;
}
box.w = Math.abs(o.w);
box.h = Math.abs(o.h);
// add the bounding box to the image
boundingBoxes.push(box);
draw();
}
}
function draw() {
o.x = (start.x)/image.width; // start position of x
o.y = (start.y)/image.height; // start position of y
o.w = (m.x - start.x)/image.width; // width
o.h = (m.y - start.y)/image.height; // height
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
// draw all the rectangles saved in the rectsRy
boundingBoxes.map(r => {drawRect(r)});
// draw the actual rectangle
drawRect(o);
}
// add the handlers needed for dragging
crosshair_h.addEventListener("mousedown", handleMouseDown);
crosshair_v.addEventListener("mousedown", handleMouseDown);
document.addEventListener("mousemove", handleMouseMove);
document.addEventListener("mouseup", handleMouseUp);
function resetcanvas(){
// clear canvas
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
img = imgs[curr_image]
image.src = "data:image/png;base64," + img;
// onload init new canvas and display image
image.onload = function() {
// normalize display height and canvas
image.height = height;
image_cont.height = canvas_img.height = image.height;
image_cont.width = canvas_img.width = image.naturalWidth;
crosshair_v.style.height = image_cont.height + "px";
crosshair_h.style.width = image_cont.width + "px";
// draw the new image
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
// draw bounding boxes
boundingBoxes = allBoundingBoxes[curr_image];
boundingBoxes.map(r => {drawRect(r)});
};
}
function drawRect(o){
// draw a predefined rectangle
ctx.strokeStyle = "red";
ctx.lineWidth = 2;
ctx.beginPath(o);
ctx.rect(o.x * image.width, o.y * image.height, o.w * image.width, o.h * image.height);
ctx.stroke();
}
// Function to detect the mouse position
function oMousePos(canvas_img, evt) {
let ClientRect = canvas_img.getBoundingClientRect();
return {
x: evt.clientX - ClientRect.left,
y: evt.clientY - ClientRect.top
};
}
//configure colab output display
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
//build the html document that will be seen in output
div.appendChild(document.createElement('br'))
div.appendChild(image_cont)
image_cont.appendChild(canvas_img)
image_cont.appendChild(crosshair_h)
image_cont.appendChild(crosshair_v)
div.appendChild(document.createElement('br'))
div.appendChild(errorlog)
div.appendChild(prev)
div.appendChild(next)
div.appendChild(deleteButton)
div.appendChild(deleteAllbutton)
div.appendChild(document.createElement('br'))
div.appendChild(brdiv)
div.appendChild(submit)
document.querySelector("#output-area").appendChild(div);
return
}''')
# load the images as a byte array
bytearrays = []
for image in image_urls:
if isinstance(image, np.ndarray):
bytearrays.append(image_from_numpy(image))
else:
raise TypeError('Image has unsupported type {}.'.format(type(image)))
# format arrays for input
image_data = json.dumps(bytearrays)
del bytearrays
# call java script function pass string byte array(image_data) as input
display(js)
eval_js('load_image({}, \'{}\')'.format(image_data, callbackId))
return
def annotate(imgs: List[Union[str, np.ndarray]], # pylint: disable=invalid-name
box_storage_pointer: List[np.ndarray],
callbackId: str = None):
"""Open the bounding box UI and prompt the user for input.
Args:
imgs: list[str | np.ndarray]
List of locations from where to load the images from. If a np.ndarray is
given, the array is interpretted as an image and sent to the frontend. If
a str is given, the string is interpreted as a path and is read as a
np.ndarray before being sent to the frontend.
box_storage_pointer: list[np.ndarray]
Destination list for bounding box arrays. Each array in this list
corresponds to one of the images given in imgs. The array is a
N x 4 array where N is the number of bounding boxes given by the user
for that particular image. If there are no bounding boxes for an image,
None is used instead of an empty array.
callbackId: str, optional
The ID for the callback function that communicates between the fontend
and the backend. If no ID is given, a random UUID string is used instead.
"""
# Set a random ID for the callback function
if callbackId is None:
callbackId = str(uuid.uuid1()).replace('-', '')
def dictToList(input_bbox): # pylint: disable=invalid-name
"""Convert bbox.
This function converts the dictionary from the frontend (if the format
{x, y, w, h} as shown in callbackFunction) into a list
([y_min, x_min, y_max, x_max])
Args:
input_bbox:
Returns:
A list with bbox coordinates in the form [ymin, xmin, ymax, xmax].
"""
return (input_bbox['y'], input_bbox['x'], input_bbox['y'] + input_bbox['h'],
input_bbox['x'] + input_bbox['w'])
def callbackFunction(annotations: List[List[Dict[str, float]]]): # pylint: disable=invalid-name
"""Callback function.
This is the call back function to capture the data from the frontend and
convert the data into a numpy array.
Args:
annotations: list[list[dict[str, float]]]
The input of the call back function is a list of list of objects
corresponding to the annotations. The format of annotations is shown
below
[
// stuff for image 1
[
// stuff for rect 1
{x, y, w, h},
// stuff for rect 2
{x, y, w, h},
...
],
// stuff for image 2
[
// stuff for rect 1
{x, y, w, h},
// stuff for rect 2
{x, y, w, h},
...
],
...
]
"""
# reset the boxes list
nonlocal box_storage_pointer
boxes: List[np.ndarray] = box_storage_pointer
boxes.clear()
# load the new annotations into the boxes list
for annotations_per_img in annotations:
rectangles_as_arrays = [np.clip(dictToList(annotation), 0, 1)
for annotation in annotations_per_img]
if rectangles_as_arrays:
boxes.append(np.stack(rectangles_as_arrays))
else:
boxes.append(None)
# output the annotations to the errorlog
with output.redirect_to_element('#errorlog'):
display('--boxes array populated--')
output.register_callback(callbackId, callbackFunction)
draw_bbox(imgs, callbackId)
| 19,472 | 39.56875 | 137 | py |
models | models-master/research/object_detection/utils/per_image_vrd_evaluation_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.per_image_vrd_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import per_image_vrd_evaluation
class SingleClassPerImageVrdEvaluationTest(tf.test.TestCase):
def setUp(self):
matching_iou_threshold = 0.5
self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold)
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
self.detected_box_tuples = np.array(
[([0, 0, 1.1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2]),
([1, 1, 2, 2], [0, 0, 1.1, 1])],
dtype=box_data_type)
self.detected_scores = np.array([0.8, 0.2, 0.1], dtype=float)
self.groundtruth_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=box_data_type)
def test_tp_fp_eval(self):
tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_box_tuples, self.groundtruth_box_tuples)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_tp_fp_eval_empty_gt(self):
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_box_tuples, np.array([], dtype=box_data_type))
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class MultiClassPerImageVrdEvaluationTest(tf.test.TestCase):
def setUp(self):
matching_iou_threshold = 0.5
self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold)
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'),
('relation', 'i4')])
self.detected_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1.1, 1], [1, 1, 2, 2]),
([1, 1, 2, 2], [0, 0, 1.1, 1]), ([0, 0, 1, 1], [3, 4, 5, 6])],
dtype=box_data_type)
self.detected_class_tuples = np.array(
[(1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 4, 5)], dtype=label_data_type)
self.detected_scores = np.array([0.2, 0.8, 0.1, 0.5], dtype=float)
self.groundtruth_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([1, 1, 2, 2], [0, 0, 1.1, 1]),
([0, 0, 1, 1], [3, 4, 5, 5.5])],
dtype=box_data_type)
self.groundtruth_class_tuples = np.array(
[(1, 2, 3), (1, 7, 3), (1, 4, 5)], dtype=label_data_type)
def test_tp_fp_eval(self):
scores, tp_fp_labels, mapping = self.eval.compute_detection_tp_fp(
self.detected_box_tuples, self.detected_scores,
self.detected_class_tuples, self.groundtruth_box_tuples,
self.groundtruth_class_tuples)
expected_scores = np.array([0.8, 0.5, 0.2, 0.1], dtype=float)
expected_tp_fp_labels = np.array([True, True, False, False], dtype=bool)
expected_mapping = np.array([1, 3, 0, 2])
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
self.assertTrue(np.allclose(expected_mapping, mapping))
if __name__ == '__main__':
tf.test.main()
| 4,120 | 40.21 | 80 | py |
models | models-master/research/object_detection/utils/np_box_list_ops_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array(
[[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32))
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist,
[0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.0, 0.0, 0.7, 1.0]],
dtype=np.float32))
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
dtype=np.float32))
self.assertAllClose(boxlist_concatenated_expected.get(),
boxlist_concatenated.get())
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32))
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32))
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist.add_field('scores', np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32))
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, 'labels')
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ['objectness'])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ['labels'])
self.assertFalse(subboxlist.has_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'objectness')
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'labels')
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'scores', 'Descending')
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, 'scores')
expected_boxes = np.array([[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, 'scores', np_box_list_ops.SortOrder.ASCEND)
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]],
dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 1. # No NMS
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array(10 * [0.8]))
iou_threshold = .5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = .4
expected_boxes = np.array([[0, 0, 20, 100],
[200, 200, 210, 300],],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .5
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .8
expected_boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80],
[200, 200, 210, 300], [200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32))
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
boxlist.add_field('scores', scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = boxlist_clean.get_field('scores')
classes_clean = boxlist_clean.get_field('classes')
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| 17,006 | 39.589499 | 80 | py |
models | models-master/research/object_detection/utils/target_assigner_utils_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.target_assigner_utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import test_case
class TargetUtilTest(parameterized.TestCase, test_case.TestCase):
def test_image_shape_to_grids(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)
return y_grid, x_grid
expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])
expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])
y_grid, x_grid = self.execute(graph_fn, [])
np.testing.assert_array_equal(y_grid, expected_y_grid)
np.testing.assert_array_equal(x_grid, expected_x_grid)
@parameterized.parameters((False,), (True,))
def test_coordinates_to_heatmap(self, sparse):
if not hasattr(tf, 'tensor_scatter_nd_max'):
self.skipTest('Cannot test function due to old TF version.')
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)
y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)
x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)
sigma = tf.constant([0.1, 0.5], dtype=tf.float32)
channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)
channel_weights = tf.constant([1, 1], dtype=tf.float32)
heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,
x_coordinates, sigma,
channel_onehot,
channel_weights, sparse=sparse)
return heatmap
heatmap = self.execute(graph_fn, [])
# Peak at (1, 2) for the first class.
self.assertAlmostEqual(1.0, heatmap[1, 2, 0])
# Peak at (0, 4) for the second class.
self.assertAlmostEqual(1.0, heatmap[0, 4, 1])
def test_compute_floor_offsets_with_indices_onlysource(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[0.5, 0.5], [0.3, 0.2]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_and_targets(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[1.1, -0.8], [0.1, 0.5]]))
np.testing.assert_array_almost_equal(indices, np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_multisources(self):
def graph_fn():
y_source = tf.constant([[1.0, 0.0], [2.0, 3.0]], dtype=tf.float32)
x_source = tf.constant([[2.0, 4.0], [3.0, 3.0]], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
# Offset from the first source to target.
np.testing.assert_array_almost_equal(offsets[:, 0, :],
np.array([[1.1, -0.8], [-1.9, 1.5]]))
# Offset from the second source to target.
np.testing.assert_array_almost_equal(offsets[:, 1, :],
np.array([[2.1, -2.8], [-2.9, 1.5]]))
# Indices from the first source to target.
np.testing.assert_array_almost_equal(indices[:, 0, :],
np.array([[1, 2], [2, 3]]))
# Indices from the second source to target.
np.testing.assert_array_almost_equal(indices[:, 1, :],
np.array([[0, 4], [3, 3]]))
def test_get_valid_keypoints_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_mask = np.array([[0, 1], [0, 0], [1, 0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_get_valid_keypoints_with_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
weights = tf.constant([0.0, 0.0, 1.0])
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
class_weights=weights,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
expected_mask = np.array([[0, 0], [0, 0], [1, 0]])
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:5, 0:5].
self.assertAlmostEqual(np.sum(output[0:5, 0:5]), 0.0)
# All zeros in region [12:18, 6:8].
self.assertAlmostEqual(np.sum(output[6:8, 12:18]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 5 * 5 - 2 * 6 = 163.0
self.assertAlmostEqual(np.sum(output), 163.0)
def test_blackout_pixel_weights_by_box_regions_with_weights(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 2.0, 2.0],
[0.0, 0.0, 4.0, 2.0],
[3.0, 0.0, 4.0, 4.0]],
dtype=tf.float32)
blackout = tf.constant([False, False, True], dtype=tf.bool)
weights = tf.constant([0.4, 0.3, 0.2], tf.float32)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(
4, 4, boxes, blackout, weights)
return output
output = self.execute(graph_fn, [])
expected_weights = [
[0.4, 0.4, 1.0, 1.0],
[0.4, 0.4, 1.0, 1.0],
[0.3, 0.3, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]
np.testing.assert_array_almost_equal(expected_weights, output)
def test_blackout_pixel_weights_by_box_regions_zero_instance(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
blackout = tf.zeros([0], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# The output should be all 1s since there's no annotation provided.
np.testing.assert_array_equal(output, np.ones([10, 20], dtype=np.float32))
def test_get_surrounding_grids(self):
def graph_fn():
y_coordinates = tf.constant([0.5], dtype=tf.float32)
x_coordinates = tf.constant([4.5], dtype=tf.float32)
output = ta_utils.get_surrounding_grids(
height=3,
width=5,
y_coordinates=y_coordinates,
x_coordinates=x_coordinates,
radius=1)
return output
y_indices, x_indices, valid = self.execute(graph_fn, [])
# Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4],
# [0, 5] (out of bound), [1, 4].
np.testing.assert_array_almost_equal(
y_indices,
np.array([[0.0, 0.0, 0.0, 0.0, 1.0]]))
np.testing.assert_array_almost_equal(
x_indices,
np.array([[0.0, 3.0, 4.0, 0.0, 4.0]]))
self.assertAllEqual(valid, [[False, True, True, False, True]])
def test_coordinates_to_iou(self):
def graph_fn():
y, x = tf.meshgrid(tf.range(32, dtype=tf.float32),
tf.range(32, dtype=tf.float32))
blist = box_list.BoxList(
tf.constant([[0., 0., 32., 32.],
[0., 0., 16., 16.],
[0.0, 0.0, 4.0, 4.0]]))
classes = tf.constant([[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
result = ta_utils.coordinates_to_iou(
y, x, blist, classes)
return result
result = self.execute(graph_fn, [])
self.assertEqual(result.shape, (32, 32, 3))
self.assertAlmostEqual(result[0, 0, 0], 1.0 / 7.0)
self.assertAlmostEqual(result[0, 0, 1], 1.0 / 7.0)
self.assertAlmostEqual(result[0, 16, 0], 1.0 / 7.0)
self.assertAlmostEqual(result[2, 2, 2], 1.0)
self.assertAlmostEqual(result[8, 8, 2], 0.0)
if __name__ == '__main__':
tf.test.main()
| 11,902 | 39.077441 | 80 | py |
models | models-master/research/object_detection/utils/patch_ops_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.patch_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import patch_ops
from object_detection.utils import test_case
class GetPatchMaskTest(test_case.TestCase, parameterized.TestCase):
def testMaskShape(self):
image_shape = [15, 10]
mask = patch_ops.get_patch_mask(
10, 5, patch_size=3, image_shape=image_shape)
self.assertListEqual(mask.shape.as_list(), image_shape)
def testHandleImageShapeWithChannels(self):
image_shape = [15, 10, 3]
mask = patch_ops.get_patch_mask(
10, 5, patch_size=3, image_shape=image_shape)
self.assertListEqual(mask.shape.as_list(), image_shape[:2])
def testMaskDType(self):
mask = patch_ops.get_patch_mask(2, 3, patch_size=2, image_shape=[6, 7])
self.assertDTypeEqual(mask, bool)
def testMaskAreaWithEvenPatchSize(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=2, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaWithEvenPatchSize4(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=4, image_shape=image_shape)
expected_mask = np.array([
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaWithOddPatchSize(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=3, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaPartiallyOutsideImage(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(5, 6, patch_size=5, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
@parameterized.parameters(
{'y': 0, 'x': -1},
{'y': -1, 'x': 0},
{'y': 0, 'x': 11},
{'y': 16, 'x': 0},
)
def testStaticCoordinatesOutsideImageRaisesError(self, y, x):
image_shape = [15, 10]
with self.assertRaises(tf.errors.InvalidArgumentError):
patch_ops.get_patch_mask(y, x, patch_size=3, image_shape=image_shape)
def testDynamicCoordinatesOutsideImageRaisesError(self):
def graph_fn():
image_shape = [15, 10]
x = tf.random_uniform([], minval=-2, maxval=-1, dtype=tf.int32)
y = tf.random_uniform([], minval=0, maxval=1, dtype=tf.int32)
mask = patch_ops.get_patch_mask(
y, x, patch_size=3, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute(graph_fn, [])
@parameterized.parameters(
{'patch_size': 0},
{'patch_size': -1},
)
def testStaticNonPositivePatchSizeRaisesError(self, patch_size):
image_shape = [6, 7]
with self.assertRaises(tf.errors.InvalidArgumentError):
patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
def testDynamicNonPositivePatchSizeRaisesError(self):
def graph_fn():
image_shape = [6, 7]
patch_size = -1 * tf.random_uniform([], minval=0, maxval=3,
dtype=tf.int32)
mask = patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute(graph_fn, [])
if __name__ == '__main__':
tf.test.main()
| 5,138 | 33.26 | 80 | py |
models | models-master/research/object_detection/utils/target_assigner_utils.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used by target assigner."""
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.utils import shape_utils
def image_shape_to_grids(height, width):
"""Computes xy-grids given the shape of the image.
Args:
height: The height of the image.
width: The width of the image.
Returns:
A tuple of two tensors:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
"""
out_height = tf.cast(height, tf.float32)
out_width = tf.cast(width, tf.float32)
x_range = tf.range(out_width, dtype=tf.float32)
y_range = tf.range(out_height, dtype=tf.float32)
x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')
return (y_grid, x_grid)
def _coordinates_to_heatmap_dense(y_grid, x_grid, y_coordinates, x_coordinates,
sigma, channel_onehot, channel_weights=None):
"""Dense version of coordinates to heatmap that uses an outer product."""
num_instances, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
reshaped_gaussian_map = tf.expand_dims(gaussian_map, axis=-1)
reshaped_channel_onehot = tf.reshape(channel_onehot,
(1, 1, num_instances, num_channels))
gaussian_per_box_per_class_map = (
reshaped_gaussian_map * reshaped_channel_onehot)
if channel_weights is not None:
reshaped_weights = tf.reshape(channel_weights, (1, 1, num_instances, 1))
gaussian_per_box_per_class_map *= reshaped_weights
# Take maximum along the "instance" dimension so that all per-instance
# heatmaps of the same class are merged together.
heatmap = tf.reduce_max(gaussian_per_box_per_class_map, axis=2)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return tf.stop_gradient(heatmap)
def _coordinates_to_heatmap_sparse(y_grid, x_grid, y_coordinates, x_coordinates,
sigma, channel_onehot, channel_weights=None):
"""Sparse version of coordinates to heatmap using tf.scatter."""
if not hasattr(tf, 'tensor_scatter_nd_max'):
raise RuntimeError(
('Please upgrade tensowflow to use `tensor_scatter_nd_max` or set '
'compute_heatmap_sparse=False'))
_, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
height, width = shape_utils.combined_static_and_dynamic_shape(y_grid)
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
if channel_weights is not None:
gaussian_map = gaussian_map * channel_weights[tf.newaxis, tf.newaxis, :]
channel_indices = tf.argmax(channel_onehot, axis=1)
channel_indices = channel_indices[:, tf.newaxis]
heatmap_init = tf.zeros((num_channels, height, width))
gaussian_map = tf.transpose(gaussian_map, (2, 0, 1))
heatmap = tf.tensor_scatter_nd_max(
heatmap_init, channel_indices, gaussian_map)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return tf.stop_gradient(tf.transpose(heatmap, (1, 2, 0)))
def coordinates_to_heatmap(y_grid,
x_grid,
y_coordinates,
x_coordinates,
sigma,
channel_onehot,
channel_weights=None,
sparse=False):
"""Returns the heatmap targets from a set of point coordinates.
This function maps a set of point coordinates to the output heatmap image
applied using a Gaussian kernel. Note that this function be can used by both
object detection and keypoint estimation tasks. For object detection, the
"channel" refers to the object class. For keypoint estimation, the "channel"
refers to the number of keypoint types.
Args:
y_grid: A 2D tensor with shape [height, width] which contains the grid
y-coordinates given in the (output) image dimensions.
x_grid: A 2D tensor with shape [height, width] which contains the grid
x-coordinates given in the (output) image dimensions.
y_coordinates: A 1D tensor with shape [num_instances] representing the
y-coordinates of the instances in the output space coordinates.
x_coordinates: A 1D tensor with shape [num_instances] representing the
x-coordinates of the instances in the output space coordinates.
sigma: A 1D tensor with shape [num_instances] representing the standard
deviation of the Gaussian kernel to be applied to the point.
channel_onehot: A 2D tensor with shape [num_instances, num_channels]
representing the one-hot encoded channel labels for each point.
channel_weights: A 1D tensor with shape [num_instances] corresponding to the
weight of each instance.
sparse: bool, indicating whether or not to use the sparse implementation
of the function. The sparse version scales better with number of channels,
but in some cases is known to cause OOM error. See (b/170989061).
Returns:
heatmap: A tensor of size [height, width, num_channels] representing the
heatmap. Output (height, width) match the dimensions of the input grids.
"""
if sparse:
return _coordinates_to_heatmap_sparse(
y_grid, x_grid, y_coordinates, x_coordinates, sigma, channel_onehot,
channel_weights)
else:
return _coordinates_to_heatmap_dense(
y_grid, x_grid, y_coordinates, x_coordinates, sigma, channel_onehot,
channel_weights)
def compute_floor_offsets_with_indices(y_source,
x_source,
y_target=None,
x_target=None):
"""Computes offsets from floored source(floored) to target coordinates.
This function computes the offsets from source coordinates ("floored" as if
they were put on the grids) to target coordinates. Note that the input
coordinates should be the "absolute" coordinates in terms of the output image
dimensions as opposed to the normalized coordinates (i.e. values in [0, 1]).
If the input y and x source have the second dimension (representing the
neighboring pixels), then the offsets are computed from each of the
neighboring pixels to their corresponding target (first dimension).
Args:
y_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute y-coordinates (in the output image space) of the
source points.
x_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute x-coordinates (in the output image space) of the
source points.
y_target: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the target points. If not
provided, then y_source is used as the targets.
x_target: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the target points. If not
provided, then x_source is used as the targets.
Returns:
A tuple of two tensors:
offsets: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the offsets of each input
point.
indices: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the indices of where the
offsets should be retrieved in the output image dimension space.
Raise:
ValueError: source and target shapes have unexpected values.
"""
y_source_floored = tf.floor(y_source)
x_source_floored = tf.floor(x_source)
source_shape = shape_utils.combined_static_and_dynamic_shape(y_source)
if y_target is None and x_target is None:
y_target = y_source
x_target = x_source
else:
target_shape = shape_utils.combined_static_and_dynamic_shape(y_target)
if len(source_shape) == 2 and len(target_shape) == 1:
_, num_neighbors = source_shape
y_target = tf.tile(
tf.expand_dims(y_target, -1), multiples=[1, num_neighbors])
x_target = tf.tile(
tf.expand_dims(x_target, -1), multiples=[1, num_neighbors])
elif source_shape != target_shape:
raise ValueError('Inconsistent source and target shape.')
y_offset = y_target - y_source_floored
x_offset = x_target - x_source_floored
y_source_indices = tf.cast(y_source_floored, tf.int32)
x_source_indices = tf.cast(x_source_floored, tf.int32)
indices = tf.stack([y_source_indices, x_source_indices], axis=-1)
offsets = tf.stack([y_offset, x_offset], axis=-1)
return offsets, indices
def coordinates_to_iou(y_grid, x_grid, blist,
channels_onehot, weights=None):
"""Computes a per-pixel IoU with groundtruth boxes.
At each pixel, we return the IoU assuming that we predicted the
ideal height and width for the box at that location.
Args:
y_grid: A 2D tensor with shape [height, width] which contains the grid
y-coordinates given in the (output) image dimensions.
x_grid: A 2D tensor with shape [height, width] which contains the grid
x-coordinates given in the (output) image dimensions.
blist: A BoxList object with `num_instances` number of boxes.
channels_onehot: A 2D tensor with shape [num_instances, num_channels]
representing the one-hot encoded channel labels for each point.
weights: A 1D tensor with shape [num_instances] corresponding to the
weight of each instance.
Returns:
iou_heatmap: A [height, width, num_channels] shapes float tensor denoting
the IoU based heatmap.
"""
image_height, image_width = tf.shape(y_grid)[0], tf.shape(y_grid)[1]
num_pixels = image_height * image_width
_, _, height, width = blist.get_center_coordinates_and_sizes()
num_boxes = tf.shape(height)[0]
per_pixel_ymin = (y_grid[tf.newaxis, :, :] -
(height[:, tf.newaxis, tf.newaxis] / 2.0))
per_pixel_xmin = (x_grid[tf.newaxis, :, :] -
(width[:, tf.newaxis, tf.newaxis] / 2.0))
per_pixel_ymax = (y_grid[tf.newaxis, :, :] +
(height[:, tf.newaxis, tf.newaxis] / 2.0))
per_pixel_xmax = (x_grid[tf.newaxis, :, :] +
(width[:, tf.newaxis, tf.newaxis] / 2.0))
# [num_boxes, height, width] -> [num_boxes * height * width]
per_pixel_ymin = tf.reshape(
per_pixel_ymin, [num_pixels * num_boxes])
per_pixel_xmin = tf.reshape(
per_pixel_xmin, [num_pixels * num_boxes])
per_pixel_ymax = tf.reshape(
per_pixel_ymax, [num_pixels * num_boxes])
per_pixel_xmax = tf.reshape(
per_pixel_xmax, [num_pixels * num_boxes])
per_pixel_blist = box_list.BoxList(
tf.stack([per_pixel_ymin, per_pixel_xmin,
per_pixel_ymax, per_pixel_xmax], axis=1))
target_boxes = tf.tile(
blist.get()[:, tf.newaxis, :], [1, num_pixels, 1])
# [num_boxes, height * width, 4] -> [num_boxes * height * wdith, 4]
target_boxes = tf.reshape(target_boxes,
[num_pixels * num_boxes, 4])
target_blist = box_list.BoxList(target_boxes)
ious = box_list_ops.matched_iou(target_blist, per_pixel_blist)
ious = tf.reshape(ious, [num_boxes, image_height, image_width])
per_class_iou = (
ious[:, :, :, tf.newaxis] *
channels_onehot[:, tf.newaxis, tf.newaxis, :])
if weights is not None:
per_class_iou = (
per_class_iou * weights[:, tf.newaxis, tf.newaxis, tf.newaxis])
per_class_iou = tf.maximum(per_class_iou, 0.0)
return tf.reduce_max(per_class_iou, axis=0)
def get_valid_keypoint_mask_for_class(keypoint_coordinates,
class_id,
class_onehot,
class_weights=None,
keypoint_indices=None):
"""Mask keypoints by their class ids and indices.
For a given task, we may want to only consider a subset of instances or
keypoints. This function is used to provide the mask (in terms of weights) to
mark those elements which should be considered based on the classes of the
instances and optionally, their keypoint indices. Note that the NaN values
in the keypoints will also be masked out.
Args:
keypoint_coordinates: A float tensor with shape [num_instances,
num_keypoints, 2] which contains the coordinates of each keypoint.
class_id: An integer representing the target class id to be selected.
class_onehot: A 2D tensor of shape [num_instances, num_classes] repesents
the onehot (or k-hot) encoding of the class for each instance.
class_weights: A 1D tensor of shape [num_instances] repesents the weight of
each instance. If not provided, all instances are weighted equally.
keypoint_indices: A list of integers representing the keypoint indices used
to select the values on the keypoint dimension. If provided, the output
dimension will be [num_instances, len(keypoint_indices)]
Returns:
A tuple of tensors:
mask: A float tensor of shape [num_instances, K], where K is num_keypoints
or len(keypoint_indices) if provided. The tensor has values either 0 or
1 indicating whether an element in the input keypoints should be used.
keypoints_nan_to_zeros: Same as input keypoints with the NaN values
replaced by zeros and selected columns corresponding to the
keypoint_indices (if provided). The shape of this tensor will always be
the same as the output mask.
"""
num_keypoints = tf.shape(keypoint_coordinates)[1]
class_mask = class_onehot[:, class_id]
reshaped_class_mask = tf.tile(
tf.expand_dims(class_mask, axis=-1), multiples=[1, num_keypoints])
not_nan = tf.math.logical_not(tf.math.is_nan(keypoint_coordinates))
mask = reshaped_class_mask * tf.cast(not_nan[:, :, 0], dtype=tf.float32)
keypoints_nan_to_zeros = tf.where(not_nan, keypoint_coordinates,
tf.zeros_like(keypoint_coordinates))
if class_weights is not None:
reshaped_class_weight = tf.tile(
tf.expand_dims(class_weights, axis=-1), multiples=[1, num_keypoints])
mask = mask * reshaped_class_weight
if keypoint_indices is not None:
mask = tf.gather(mask, indices=keypoint_indices, axis=1)
keypoints_nan_to_zeros = tf.gather(
keypoints_nan_to_zeros, indices=keypoint_indices, axis=1)
return mask, keypoints_nan_to_zeros
def blackout_pixel_weights_by_box_regions(height, width, boxes, blackout,
weights=None,
boxes_scale=1.0):
"""Apply weights at pixel locations.
This function is used to generate the pixel weight mask (usually in the output
image dimension). The mask is to ignore some regions when computing loss.
Weights are applied as follows:
- Any region outside of a box gets the default weight 1.0
- Any box for which an explicit weight is specifed gets that weight. If
multiple boxes overlap, the maximum of the weights is applied.
- Any box for which blackout=True is specified will get a weight of 0.0,
regardless of whether an equivalent non-zero weight is specified. Also, the
blackout region takes precedence over other boxes which may overlap with
non-zero weight.
Example:
height = 4
width = 4
boxes = [[0., 0., 2., 2.],
[0., 0., 4., 2.],
[3., 0., 4., 4.]]
blackout = [False, False, True]
weights = [4.0, 3.0, 2.0]
blackout_pixel_weights_by_box_regions(height, width, boxes, blackout,
weights)
>> [[4.0, 4.0, 1.0, 1.0],
[4.0, 4.0, 1.0, 1.0],
[3.0, 3.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]
Args:
height: int, height of the (output) image.
width: int, width of the (output) image.
boxes: A float tensor with shape [num_instances, 4] indicating the
coordinates of the four corners of the boxes.
blackout: A boolean tensor with shape [num_instances] indicating whether to
blackout (zero-out) the weights within the box regions.
weights: An optional float32 tensor with shape [num_instances] indicating
a value to apply in each box region. Note that if blackout=True for a
given box, the weight will be zero. If None, all weights are assumed to be
1.
boxes_scale: The amount to scale the height/width of the boxes before
constructing the blackout regions. This is often useful to guarantee that
the proper weight fully covers the object boxes/masks during supervision,
as shifting might occur during image resizing, network stride, etc.
Returns:
A float tensor with shape [height, width] where all values within the
regions of the blackout boxes are 0.0 and 1.0 (or weights if supplied)
elsewhere.
"""
num_instances, _ = shape_utils.combined_static_and_dynamic_shape(boxes)
# If no annotation instance is provided, return all ones (instead of
# unexpected values) to avoid NaN loss value.
if num_instances == 0:
return tf.ones([height, width], dtype=tf.float32)
(y_grid, x_grid) = image_shape_to_grids(height, width)
y_grid = tf.expand_dims(y_grid, axis=0)
x_grid = tf.expand_dims(x_grid, axis=0)
boxlist = box_list.BoxList(boxes)
boxlist = box_list_ops.scale_height_width(
boxlist, y_scale=boxes_scale, x_scale=boxes_scale)
boxes = boxlist.get()
y_min = tf.expand_dims(boxes[:, 0:1], axis=-1)
x_min = tf.expand_dims(boxes[:, 1:2], axis=-1)
y_max = tf.expand_dims(boxes[:, 2:3], axis=-1)
x_max = tf.expand_dims(boxes[:, 3:], axis=-1)
# Make the mask with all 1.0 in the box regions.
# Shape: [num_instances, height, width]
in_boxes = tf.math.logical_and(
tf.math.logical_and(y_grid >= y_min, y_grid < y_max),
tf.math.logical_and(x_grid >= x_min, x_grid < x_max))
if weights is None:
weights = tf.ones_like(blackout, dtype=tf.float32)
# Compute a [height, width] tensor with the maximum weight in each box, and
# 0.0 elsewhere.
weights_tiled = tf.tile(
weights[:, tf.newaxis, tf.newaxis], [1, height, width])
weights_3d = tf.where(in_boxes, weights_tiled,
tf.zeros_like(weights_tiled))
weights_2d = tf.math.maximum(
tf.math.reduce_max(weights_3d, axis=0), 0.0)
# Add 1.0 to all regions outside a box.
weights_2d = tf.where(
tf.math.reduce_any(in_boxes, axis=0),
weights_2d,
tf.ones_like(weights_2d))
# Now enforce that blackout regions all have zero weights.
keep_region = tf.cast(tf.math.logical_not(blackout), tf.float32)
keep_region_tiled = tf.tile(
keep_region[:, tf.newaxis, tf.newaxis], [1, height, width])
keep_region_3d = tf.where(in_boxes, keep_region_tiled,
tf.ones_like(keep_region_tiled))
keep_region_2d = tf.math.reduce_min(keep_region_3d, axis=0)
return weights_2d * keep_region_2d
def _get_yx_indices_offset_by_radius(radius):
"""Gets the y and x index offsets that are within the radius."""
y_offsets = []
x_offsets = []
for y_offset in range(-radius, radius + 1, 1):
for x_offset in range(-radius, radius + 1, 1):
if x_offset ** 2 + y_offset ** 2 <= radius ** 2:
y_offsets.append(y_offset)
x_offsets.append(x_offset)
return (tf.constant(y_offsets, dtype=tf.float32),
tf.constant(x_offsets, dtype=tf.float32))
def get_surrounding_grids(height, width, y_coordinates, x_coordinates, radius):
"""Gets the indices of the surrounding pixels of the input y, x coordinates.
This function returns the pixel indices corresponding to the (floor of the)
input coordinates and their surrounding pixels within the radius. If the
radius is set to 0, then only the pixels that correspond to the floor of the
coordinates will be returned. If the radius is larger than 0, then all of the
pixels within the radius of the "floor pixels" will also be returned. For
example, if the input coorindate is [2.1, 3.5] and radius is 1, then the five
pixel indices will be returned: [2, 3], [1, 3], [2, 2], [2, 4], [3, 3]. Also,
if the surrounding pixels are outside of valid image region, then the returned
pixel indices will be [0, 0] and its corresponding "valid" value will be
False.
Args:
height: int, the height of the output image.
width: int, the width of the output image.
y_coordinates: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the points.
x_coordinates: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the points.
radius: int, the radius of the neighboring pixels to be considered and
returned. If set to 0, then only the pixel indices corresponding to the
floor of the input coordinates will be returned.
Returns:
A tuple of three tensors:
y_indices: A [num_points, num_neighbors] float tensor representing the
pixel y indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
x_indices: A [num_points, num_neighbors] float tensor representing the
pixel x indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
valid: A [num_points, num_neighbors] boolean tensor representing whether
each returned index is in valid image region or not.
"""
# Floored y, x: [num_points, 1].
y_center = tf.expand_dims(tf.math.floor(y_coordinates), axis=-1)
x_center = tf.expand_dims(tf.math.floor(x_coordinates), axis=-1)
y_offsets, x_offsets = _get_yx_indices_offset_by_radius(radius)
# Indices offsets: [1, num_neighbors].
y_offsets = tf.expand_dims(y_offsets, axis=0)
x_offsets = tf.expand_dims(x_offsets, axis=0)
# Floor + offsets: [num_points, num_neighbors].
y_output = y_center + y_offsets
x_output = x_center + x_offsets
default_output = tf.zeros_like(y_output)
valid = tf.logical_and(
tf.logical_and(x_output >= 0, x_output < width),
tf.logical_and(y_output >= 0, y_output < height))
y_output = tf.where(valid, y_output, default_output)
x_output = tf.where(valid, x_output, default_output)
return (y_output, x_output, valid)
| 23,874 | 43.377323 | 80 | py |
models | models-master/research/object_detection/utils/learning_schedules_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.learning_schedules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
from object_detection.utils import test_case
class LearningSchedulesTest(test_case.TestCase):
def testExponentialDecayWithBurnin(self):
def graph_fn(global_step):
learning_rate_base = 1.0
learning_rate_decay_steps = 3
learning_rate_decay_factor = .1
burnin_learning_rate = .5
burnin_steps = 2
min_learning_rate = .05
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, burnin_learning_rate, burnin_steps,
min_learning_rate)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9)
]
exp_rates = [.5, .5, 1, 1, 1, .1, .1, .1, .05]
self.assertAllClose(output_rates, exp_rates, rtol=1e-4)
def testExponentialDecayWithWarmup(self):
def graph_fn(global_step):
learning_rate_base = 1.0
learning_rate_decay_steps = 3
learning_rate_decay_factor = .1
warmup_learning_rate = .5
warmup_steps = 2
min_learning_rate = .05
learning_rate = learning_schedules.exponential_decay_with_warmup(
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, warmup_learning_rate, warmup_steps,
min_learning_rate)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9)
]
exp_rates = [.5, .75, 1, 1, 1, .1, .1, .1, .05]
self.assertAllClose(output_rates, exp_rates, rtol=1e-4)
def testCosineDecayWithWarmup(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 100
warmup_learning_rate = 0.1
warmup_steps = 9
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0.1, 0.5, 0.9, 1.0, 0]
input_global_steps = [0, 4, 8, 9, 100]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testCosineDecayAfterTotalSteps(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 100
warmup_learning_rate = 0.1
warmup_steps = 9
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0]
input_global_steps = [101]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testCosineDecayWithHoldBaseLearningRateSteps(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 120
warmup_learning_rate = 0.1
warmup_steps = 9
hold_base_rate_steps = 20
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps, hold_base_rate_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0.1, 0.5, 0.9, 1.0, 1.0, 1.0, 0.999702, 0.874255, 0.577365,
0.0]
input_global_steps = [0, 4, 8, 9, 10, 29, 30, 50, 70, 120]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testManualStepping(self):
def graph_fn(global_step):
boundaries = [2, 3, 7]
rates = [1.0, 2.0, 3.0, 4.0]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(10)
]
exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
self.assertAllClose(output_rates, exp_rates)
def testManualSteppingWithWarmup(self):
def graph_fn(global_step):
boundaries = [4, 6, 8]
rates = [0.02, 0.10, 0.01, 0.001]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates, warmup=True)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(9)
]
exp_rates = [0.02, 0.04, 0.06, 0.08, 0.10, 0.10, 0.01, 0.01, 0.001]
self.assertAllClose(output_rates, exp_rates)
def testManualSteppingWithZeroBoundaries(self):
def graph_fn(global_step):
boundaries = []
rates = [0.01]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(4)
]
exp_rates = [0.01] * 4
self.assertAllClose(output_rates, exp_rates)
if __name__ == '__main__':
tf.test.main()
| 6,613 | 34.945652 | 80 | py |
models | models-master/research/object_detection/utils/np_mask_ops_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_mask_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_mask_ops
class MaskOpsTests(tf.test.TestCase):
def setUp(self):
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.masks1 = masks1
self.masks2 = masks2
def testArea(self):
areas = np_mask_ops.area(self.masks1)
expected_areas = np.array([8.0, 10.0], dtype=np.float32)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_mask_ops.intersection(self.masks1, self.masks2)
expected_intersection = np.array(
[[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_mask_ops.iou(self.masks1, self.masks2)
expected_iou = np.array(
[[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
ioa21 = np_mask_ops.ioa(self.masks1, self.masks2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| 3,627 | 38.010753 | 80 | py |
models | models-master/research/object_detection/utils/variables_helper_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.variables_helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
from object_detection.utils import variables_helper
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FilterVariablesTest(test_case.TestCase):
def _create_variables(self):
return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'),
tf.Variable(1.0, name='StackProposalGenerator/weights'),
tf.Variable(1.0, name='StackProposalGenerator/biases')]
def test_return_all_variables_when_empty_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [''])
self.assertCountEqual(out_variables, variables)
def test_return_variables_which_do_not_match_single_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables,
['FeatureExtractor/.*'])
self.assertCountEqual(out_variables, variables[2:])
def test_return_variables_which_do_not_match_any_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [
'FeatureExtractor.*biases', 'StackProposalGenerator.*biases'
])
self.assertCountEqual(out_variables, [variables[0], variables[2]])
def test_return_variables_matching_empty_regex_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables, [''], invert=True)
self.assertCountEqual(out_variables, [])
def test_return_variables_matching_some_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables,
['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'],
invert=True)
self.assertCountEqual(out_variables, [variables[1], variables[3]])
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class MultiplyGradientsMatchingRegexTest(tf.test.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_multiply_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
def test_multiply_all_bias_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['.*/biases']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FreezeGradientsMatchingRegexTest(test_case.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_freeze_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, regex_list)
exp_output = [(3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class GetVariablesAvailableInCheckpointTest(test_case.TestCase):
def test_return_all_variables_from_checkpoint(self):
with tf.Graph().as_default():
variables = [
tf.Variable(1.0, name='weights'),
tf.Variable(1.0, name='biases')
]
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
out_variables = variables_helper.get_variables_available_in_checkpoint(
variables, checkpoint_path)
self.assertCountEqual(out_variables, variables)
def test_return_all_variables_from_checkpoint_with_partition(self):
with tf.Graph().as_default():
partitioner = tf.fixed_size_partitioner(2)
variables = [
tf.get_variable(
name='weights', shape=(2, 2), partitioner=partitioner),
tf.Variable([1.0, 2.0], name='biases')
]
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
out_variables = variables_helper.get_variables_available_in_checkpoint(
variables, checkpoint_path)
self.assertCountEqual(out_variables, variables)
def test_return_variables_available_in_checkpoint(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
weight_variable = tf.Variable(1.0, name='weights')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
weight_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=False)
self.assertCountEqual(out_variables, [weight_variable])
def test_return_variables_available_an_checkpoint_with_dict_inputs(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
graph1_variables = [
tf.Variable(1.0, name='ckpt_weights'),
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables_dict = {
'ckpt_weights': tf.Variable(1.0, name='weights'),
'ckpt_biases': tf.Variable(1.0, name='biases')
}
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables_dict, checkpoint_path)
self.assertIsInstance(out_variables, dict)
self.assertCountEqual(list(out_variables.keys()), ['ckpt_weights'])
self.assertEqual(out_variables['ckpt_weights'].op.name, 'weights')
def test_return_variables_with_correct_sizes(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
bias_variable = tf.Variable(3.0, name='biases')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'),
bias_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables = [
tf.Variable([1.0, 2.0], name='weights'), # New variable shape.
bias_variable,
global_step
]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=True)
self.assertCountEqual(out_variables, [bias_variable, global_step])
if __name__ == '__main__':
tf.test.main()
| 10,185 | 39.581673 | 80 | py |
models | models-master/research/object_detection/utils/metrics_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import metrics
class MetricsTest(tf.test.TestCase):
def test_compute_cor_loc(self):
num_gt_imgs_per_class = np.array([100, 1, 5, 1, 1], dtype=int)
num_images_correctly_detected_per_class = np.array(
[10, 0, 1, 0, 0], dtype=int)
corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class)
expected_corloc = np.array([0.1, 0, 0.2, 0, 0], dtype=float)
self.assertTrue(np.allclose(corloc, expected_corloc))
def test_compute_cor_loc_nans(self):
num_gt_imgs_per_class = np.array([100, 0, 0, 1, 1], dtype=int)
num_images_correctly_detected_per_class = np.array(
[10, 0, 1, 0, 0], dtype=int)
corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class)
expected_corloc = np.array([0.1, np.nan, np.nan, 0, 0], dtype=float)
self.assertAllClose(corloc, expected_corloc)
def test_compute_precision_recall(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 1, 1, 0, 0, 1], dtype=bool)
labels_float_type = np.array([0, 1, 1, 0, 0, 1], dtype=float)
accumulated_tp_count = np.array([0, 1, 1, 2, 2, 3], dtype=float)
expected_precision = accumulated_tp_count / np.array([1, 2, 3, 4, 5, 6])
expected_recall = accumulated_tp_count / num_gt
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
precision_float_type, recall_float_type = metrics.compute_precision_recall(
scores, labels_float_type, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
self.assertAllClose(precision_float_type, expected_precision)
self.assertAllClose(recall_float_type, expected_recall)
def test_compute_precision_recall_float(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels_float = np.array([0, 1, 1, 0.5, 0, 1], dtype=float)
expected_precision = np.array(
[0., 0.5, 0.33333333, 0.5, 0.55555556, 0.63636364], dtype=float)
expected_recall = np.array([0., 0.1, 0.1, 0.2, 0.25, 0.35], dtype=float)
precision, recall = metrics.compute_precision_recall(
scores, labels_float, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
def test_compute_average_precision(self):
precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
processed_precision = np.array(
[0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
expected_mean_ap = np.sum(recall_interval * processed_precision)
mean_ap = metrics.compute_average_precision(precision, recall)
self.assertAlmostEqual(expected_mean_ap, mean_ap)
def test_compute_precision_recall_and_ap_no_groundtruth(self):
num_gt = 0
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
expected_precision = None
expected_recall = None
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
self.assertEquals(precision, expected_precision)
self.assertEquals(recall, expected_recall)
ap = metrics.compute_average_precision(precision, recall)
self.assertTrue(np.isnan(ap))
def test_compute_recall_at_k(self):
num_gt = 4
tp_fp = [
np.array([1, 0, 0], dtype=float),
np.array([0, 1], dtype=float),
np.array([0, 0, 0, 0, 0], dtype=float)
]
tp_fp_bool = [
np.array([True, False, False], dtype=bool),
np.array([False, True], dtype=float),
np.array([False, False, False, False, False], dtype=float)
]
recall_1 = metrics.compute_recall_at_k(tp_fp, num_gt, 1)
recall_3 = metrics.compute_recall_at_k(tp_fp, num_gt, 3)
recall_5 = metrics.compute_recall_at_k(tp_fp, num_gt, 5)
recall_3_bool = metrics.compute_recall_at_k(tp_fp_bool, num_gt, 3)
self.assertAlmostEqual(recall_1, 0.25)
self.assertAlmostEqual(recall_3, 0.5)
self.assertAlmostEqual(recall_3_bool, 0.5)
self.assertAlmostEqual(recall_5, 0.5)
def test_compute_median_rank_at_k(self):
tp_fp = [
np.array([1, 0, 0], dtype=float),
np.array([0, 0.1], dtype=float),
np.array([0, 0, 0, 0, 0], dtype=float)
]
tp_fp_bool = [
np.array([True, False, False], dtype=bool),
np.array([False, True], dtype=float),
np.array([False, False, False, False, False], dtype=float)
]
median_ranks_1 = metrics.compute_median_rank_at_k(tp_fp, 1)
median_ranks_3 = metrics.compute_median_rank_at_k(tp_fp, 3)
median_ranks_5 = metrics.compute_median_rank_at_k(tp_fp, 5)
median_ranks_3_bool = metrics.compute_median_rank_at_k(tp_fp_bool, 3)
self.assertEquals(median_ranks_1, 0)
self.assertEquals(median_ranks_3, 0.5)
self.assertEquals(median_ranks_3_bool, 0.5)
self.assertEquals(median_ranks_5, 0.5)
if __name__ == '__main__':
tf.test.main()
| 6,162 | 40.641892 | 80 | py |
models | models-master/research/object_detection/utils/per_image_evaluation_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.per_image_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import per_image_evaluation
class SingleClassTpFpWithDifficultBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes)
self.detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
self.detected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 10, 10]],
dtype=float)
groundtruth_masks_0 = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array([[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1], axis=0)
def test_match_to_gt_box_0(self):
groundtruth_groundtruth_is_difficult_list = np.array([False, True],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_gt_mask_0(self):
groundtruth_groundtruth_is_difficult_list = np.array([False, True],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_to_gt_box_1(self):
groundtruth_groundtruth_is_difficult_list = np.array([True, False],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_gt_mask_1(self):
groundtruth_groundtruth_is_difficult_list = np.array([True, False],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
expected_scores = np.array([0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpWithGroupOfBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes)
self.detected_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 1], [0, 0, 3, 1]], dtype=float)
self.detected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 5, 5], [10, 10, 20, 20]], dtype=float)
groundtruth_masks_0 = np.array([[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_2 = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1, groundtruth_masks_2], axis=0)
def test_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.6], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.5], dtype=float)
expected_tp_fp_labels = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpWithGroupOfBoxesTestWeighted(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.group_of_weight = 0.5
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes, self.group_of_weight)
self.detected_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 1], [0, 0, 3, 1]], dtype=float)
self.detected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
detected_masks_0 = np.array(
[[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array(
[[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array(
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 5, 5], [10, 10, 20, 20]], dtype=float)
groundtruth_masks_0 = np.array(
[[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array(
[[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_2 = np.array(
[[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1, groundtruth_masks_2], axis=0)
def test_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([1.0, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
expected_tp_fp_labels = np.array(
[1.0, self.group_of_weight, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
tf.logging.info(
"test_mask_match_to_non_group_of_and_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.5, 0.8], dtype=float)
expected_tp_fp_labels = np.array([0.0, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
tf.logging.info("test_match_two_to_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array(
[1.0, self.group_of_weight, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
tf.logging.info("test_mask_match_two_to_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpNoDifficultBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold_high_iou = 0.5
matching_iou_threshold_low_iou = 0.1
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval_high_iou = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_high_iou,
nms_iou_threshold, nms_max_output_boxes)
self.eval_low_iou = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_low_iou,
nms_iou_threshold, nms_max_output_boxes)
self.detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
self.detected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
def test_no_true_positives(self):
groundtruth_boxes = np.array([[100, 100, 105, 105]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_no_true_positives(self):
groundtruth_boxes = np.array([[100, 100, 105, 105]], dtype=float)
groundtruth_masks_0 = np.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_masks_0], axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_one_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_one_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_masks_0 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_masks_0], axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_one_true_positives_with_very_small_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_low_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3.5, 3.5]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpEmptyMaskAndBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold_iou = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_iou, nms_iou_threshold,
nms_max_output_boxes)
def test_mask_tp_and_ignore(self):
# GT: one box with mask, one without
# Det: One mask matches gt1, one matches box gt2 and is ignored
groundtruth_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes, detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list, detected_masks,
groundtruth_masks)
expected_scores = np.array([0.6], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_one_tp_one_fp(self):
# GT: one box with mask, one without
# Det: one mask matches gt1, one is fp (box does not match)
groundtruth_boxes = np.array([[0, 0, 2, 3], [2, 2, 4, 4]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([False, True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_mask_one_gt_one_ignore(self):
# GT: one box with mask, one without.
# Det: two mask matches same gt, one is tp, one is passed down to box match
# and ignored.
groundtruth_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_mask_one_gt_one_fp(self):
# GT: one box with mask, one without.
# Det: two mask matches same gt, one is tp, one is passed down to box match
# and is fp.
groundtruth_boxes = np.array([[0, 0, 2, 3], [2, 3, 4, 6]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class MultiClassesTpFpTest(tf.test.TestCase):
def test_tp_fp(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [10, 10, 5, 5], [0, 0, 2, 2],
[5, 10, 10, 5], [10, 5, 5, 10], [0, 0, 3, 3]],
dtype=float)
detected_scores = np.array([0.8, 0.1, 0.8, 0.9, 0.7, 0.8], dtype=float)
detected_class_labels = np.array([0, 1, 1, 2, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3.5, 3.5]], dtype=float)
groundtruth_class_labels = np.array([0, 2], dtype=int)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=float)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels, _ = eval1.compute_object_detection_metrics(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = [np.array([0.8], dtype=float)] * 3
expected_tp_fp_labels = [np.array([True]), np.array([False]), np.array([True
])]
for i in range(len(expected_scores)):
self.assertTrue(np.allclose(expected_scores[i], scores[i]))
self.assertTrue(np.array_equal(expected_tp_fp_labels[i], tp_fp_labels[i]))
class CorLocTest(tf.test.TestCase):
def test_compute_corloc_with_normal_iou_threshold(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3],
[0, 0, 5, 5]], dtype=float)
detected_scores = np.array([0.9, 0.9, 0.1, 0.9], dtype=float)
detected_class_labels = np.array([0, 1, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3], [0, 0, 6, 6]],
dtype=float)
groundtruth_class_labels = np.array([0, 0, 2], dtype=int)
is_class_correctly_detected_in_image = eval1._compute_cor_loc(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels)
expected_result = np.array([1, 0, 1], dtype=int)
self.assertTrue(np.array_equal(expected_result,
is_class_correctly_detected_in_image))
def test_compute_corloc_with_very_large_iou_threshold(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.9
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3],
[0, 0, 5, 5]], dtype=float)
detected_scores = np.array([0.9, 0.9, 0.1, 0.9], dtype=float)
detected_class_labels = np.array([0, 1, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3], [0, 0, 6, 6]],
dtype=float)
groundtruth_class_labels = np.array([0, 0, 2], dtype=int)
is_class_correctly_detected_in_image = eval1._compute_cor_loc(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels)
expected_result = np.array([1, 0, 0], dtype=int)
self.assertTrue(np.array_equal(expected_result,
is_class_correctly_detected_in_image))
if __name__ == "__main__":
tf.test.main()
| 35,342 | 48.155772 | 80 | py |
models | models-master/research/object_detection/utils/vrd_evaluation_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.vrd_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import vrd_evaluation
class VRDRelationDetectionEvaluatorTest(tf.test.TestCase):
def test_vrdrelation_evaluator(self):
self.vrd_eval = vrd_evaluation.VRDRelationDetectionEvaluator()
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples1,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels1
})
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples2,
})
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples3,
})
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2]),
([0.5, 0, 1, 1], [1, 1, 3, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_box_tuples,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_tuples
})
metrics = self.vrd_eval.evaluate()
self.assertAlmostEqual(metrics['[email protected]'],
0.25)
self.assertAlmostEqual(metrics['[email protected]'],
0.1666666666666666)
self.assertAlmostEqual(metrics['[email protected]/3'],
0.3333333333333333)
self.assertAlmostEqual(metrics['[email protected]/4'], 0)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@[email protected]'],
0.25)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@[email protected]'],
0.25)
self.vrd_eval.clear()
self.assertFalse(self.vrd_eval._image_ids)
class VRDPhraseDetectionEvaluatorTest(tf.test.TestCase):
def test_vrdphrase_evaluator(self):
self.vrd_eval = vrd_evaluation.VRDPhraseDetectionEvaluator()
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples1,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels1
})
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples2,
})
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples3,
})
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 0.5, 0.5], [0.3, 0.3, 1.0, 1.0]),
([0, 0, 1.2, 1.2], [0.0, 0.0, 2.0, 2.0]),
([0.5, 0, 1, 1], [1, 1, 3, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_box_tuples,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_tuples
})
metrics = self.vrd_eval.evaluate()
self.assertAlmostEqual(metrics['[email protected]'], 0.25)
self.assertAlmostEqual(metrics['[email protected]'],
0.1666666666666666)
self.assertAlmostEqual(metrics['[email protected]/3'],
0.3333333333333333)
self.assertAlmostEqual(metrics['[email protected]/4'], 0)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@[email protected]'], 0.25)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@[email protected]'], 0.25)
self.vrd_eval.clear()
self.assertFalse(self.vrd_eval._image_ids)
class VRDDetectionEvaluationTest(tf.test.TestCase):
def setUp(self):
self.vrd_eval = vrd_evaluation._VRDDetectionEvaluation(
matching_iou_threshold=0.5)
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, groundtruth_box_tuples1, groundtruth_class_tuples1)
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, groundtruth_box_tuples2, groundtruth_class_tuples2)
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, groundtruth_box_tuples3, groundtruth_class_tuples3)
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 3), (1, 2, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, detected_box_tuples, detected_scores, detected_class_tuples)
metrics = self.vrd_eval.evaluate()
expected_weighted_average_precision = 0.25
expected_mean_average_precision = 0.16666666666666
expected_precision = np.array([1., 0.5], dtype=float)
expected_recall = np.array([0.25, 0.25], dtype=float)
expected_recall_50 = 0.25
expected_recall_100 = 0.25
expected_median_rank_50 = 0
expected_median_rank_100 = 0
self.assertAlmostEqual(expected_weighted_average_precision,
metrics.weighted_average_precision)
self.assertAlmostEqual(expected_mean_average_precision,
metrics.mean_average_precision)
self.assertAlmostEqual(expected_mean_average_precision,
metrics.mean_average_precision)
self.assertAllClose(expected_precision, metrics.precisions)
self.assertAllClose(expected_recall, metrics.recalls)
self.assertAlmostEqual(expected_recall_50, metrics.recall_50)
self.assertAlmostEqual(expected_recall_100, metrics.recall_100)
self.assertAlmostEqual(expected_median_rank_50, metrics.median_rank_50)
self.assertAlmostEqual(expected_median_rank_100, metrics.median_rank_100)
if __name__ == '__main__':
tf.test.main()
| 11,183 | 41.687023 | 80 | py |
models | models-master/research/object_detection/utils/np_box_mask_list.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxMaskList classes and functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from object_detection.utils import np_box_list
class BoxMaskList(np_box_list.BoxList):
"""Convenience wrapper for BoxList with masks.
BoxMaskList extends the np_box_list.BoxList to contain masks as well.
In particular, its constructor receives both boxes and masks. Note that the
masks correspond to the full image.
"""
def __init__(self, box_data, mask_data):
"""Constructs box collection.
Args:
box_data: a numpy array of shape [N, 4] representing box coordinates
mask_data: a numpy array of shape [N, height, width] representing masks
with values are in {0,1}. The masks correspond to the full
image. The height and the width will be equal to image height and width.
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
ValueError: if mask data is not a numpy array
ValueError: if invalid dimension for mask data
"""
super(BoxMaskList, self).__init__(box_data)
if not isinstance(mask_data, np.ndarray):
raise ValueError('Mask data must be a numpy array.')
if len(mask_data.shape) != 3:
raise ValueError('Invalid dimensions for mask data.')
if mask_data.dtype != np.uint8:
raise ValueError('Invalid data type for mask data: uint8 is required.')
if mask_data.shape[0] != box_data.shape[0]:
raise ValueError('There should be the same number of boxes and masks.')
self.data['masks'] = mask_data
def get_masks(self):
"""Convenience function for accessing masks.
Returns:
a numpy array of shape [N, height, width] representing masks
"""
return self.get_field('masks')
| 2,541 | 36.940299 | 80 | py |
models | models-master/research/object_detection/utils/learning_schedules.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of common learning rate schedules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
def _learning_rate_return_value(eager_decay_rate):
"""Helper function to return proper learning rate based on tf version."""
if tf.executing_eagerly():
return eager_decay_rate
else:
return eager_decay_rate()
def exponential_decay_with_burnin(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate=0.0,
burnin_steps=0,
min_learning_rate=0.0,
staircase=True):
"""Exponential decay schedule with burn-in period.
In this schedule, learning rate is fixed at burnin_learning_rate
for a fixed period, before transitioning to a regular exponential
decay schedule.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay
learning rate.
burnin_learning_rate: initial learning rate during burn-in period. If
0.0 (which is the default), then the burn-in learning rate is simply
set to learning_rate_base.
burnin_steps: number of steps to use burnin learning rate.
min_learning_rate: the minimum learning rate.
staircase: whether use staircase decay.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
"""
if burnin_learning_rate == 0:
burnin_learning_rate = learning_rate_base
def eager_decay_rate():
"""Callable to compute the learning rate."""
post_burnin_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step - burnin_steps,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=staircase)
if callable(post_burnin_learning_rate):
post_burnin_learning_rate = post_burnin_learning_rate()
return tf.maximum(tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),
tf.constant(burnin_learning_rate),
post_burnin_learning_rate), min_learning_rate, name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
def exponential_decay_with_warmup(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
warmup_learning_rate=0.0,
warmup_steps=0,
min_learning_rate=0.0,
staircase=True):
"""Exponential decay schedule with warm up period.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay learning
rate.
warmup_learning_rate: initial learning rate during warmup period.
warmup_steps: number of steps to use warmup learning rate.
min_learning_rate: the minimum learning rate.
staircase: whether use staircase decay.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
"""
def eager_decay_rate():
"""Callable to compute the learning rate."""
post_warmup_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step - warmup_steps,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=staircase)
if callable(post_warmup_learning_rate):
post_warmup_learning_rate = post_warmup_learning_rate()
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(warmup_steps)),
warmup_rate,
tf.maximum(post_warmup_learning_rate, min_learning_rate),
name='learning_rate')
return learning_rate
return _learning_rate_return_value(eager_decay_rate)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Args:
global_step: int64 (scalar) tensor representing global step.
learning_rate_base: base learning rate.
total_steps: total number of training steps.
warmup_learning_rate: initial learning rate for warm up.
warmup_steps: number of warmup steps.
hold_base_rate_steps: Optional number of steps to hold base learning rate
before decaying.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
def eager_decay_rate():
"""Callable to compute the learning rate."""
learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(
np.pi *
(tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = tf.where(
global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return tf.where(global_step > total_steps, 0.0, learning_rate,
name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
def manual_stepping(global_step, boundaries, rates, warmup=False):
"""Manually stepped learning rate schedule.
This function provides fine grained control over learning rates. One must
specify a sequence of learning rates as well as a set of integer steps
at which the current learning rate must transition to the next. For example,
if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning
rate returned by this function is .1 for global_step=0,...,4, .01 for
global_step=5...9, and .001 for global_step=10 and onward.
Args:
global_step: int64 (scalar) tensor representing global step.
boundaries: a list of global steps at which to switch learning
rates. This list is assumed to consist of increasing positive integers.
rates: a list of (float) learning rates corresponding to intervals between
the boundaries. The length of this list must be exactly
len(boundaries) + 1.
warmup: Whether to linearly interpolate learning rate for steps in
[0, boundaries[0]].
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
Raises:
ValueError: if one of the following checks fails:
1. boundaries is a strictly increasing list of positive integers
2. len(rates) == len(boundaries) + 1
3. boundaries[0] != 0
"""
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError('Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
if boundaries and boundaries[0] == 0:
raise ValueError('First step cannot be zero.')
if warmup and boundaries:
slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]
warmup_steps = list(range(boundaries[0]))
warmup_rates = [rates[0] + slope * step for step in warmup_steps]
boundaries = warmup_steps + boundaries
rates = warmup_rates + rates[1:]
else:
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
def eager_decay_rate():
"""Callable to compute the learning rate."""
rate_index = tf.reduce_max(tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)),
[0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),
name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
| 11,693 | 41.216606 | 80 | py |
models | models-master/research/object_detection/utils/np_mask_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, height, width] numpy arrays representing masks.
Example mask operations that are supported:
* Areas: compute mask areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
| 4,325 | 33.608 | 80 | py |
models | models-master/research/object_detection/utils/vrd_evaluation.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluator class for Visual Relations Detection.
VRDDetectionEvaluator is a class which manages ground truth information of a
visual relations detection (vrd) dataset, and computes frequently used detection
metrics such as Precision, Recall, Recall@k, of the provided vrd detection
results.
It supports the following operations:
1) Adding ground truth information of images sequentially.
2) Adding detection results of images sequentially.
3) Evaluating detection metrics on already inserted detection results.
Note1: groundtruth should be inserted before evaluation.
Note2: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import collections
import logging
import numpy as np
import six
from six.moves import range
from object_detection.core import standard_fields
from object_detection.utils import metrics
from object_detection.utils import object_detection_evaluation
from object_detection.utils import per_image_vrd_evaluation
# Below standard input numpy datatypes are defined:
# box_data_type - datatype of the groundtruth visual relations box annotations;
# this datatype consists of two named boxes: subject bounding box and object
# bounding box. Each box is of the format [y_min, x_min, y_max, x_max], each
# coordinate being of type float32.
# label_data_type - corresponding datatype of the visual relations label
# annotaions; it consists of three numerical class labels: subject class label,
# object class label and relation class label, each class label being of type
# int32.
vrd_box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
single_box_data_type = np.dtype([('box', 'f4', (4,))])
label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'), ('relation',
'i4')])
class VRDDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
"""A class to evaluate VRD detections.
This class serves as a base class for VRD evaluation in two settings:
- phrase detection
- relation detection.
"""
def __init__(self, matching_iou_threshold=0.5, metric_prefix=None):
"""Constructor.
Args:
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
"""
super(VRDDetectionEvaluator, self).__init__([])
self._matching_iou_threshold = matching_iou_threshold
self._evaluation = _VRDDetectionEvaluation(
matching_iou_threshold=self._matching_iou_threshold)
self._image_ids = set([])
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._evaluatable_labels = {}
self._negative_labels = {}
@abstractmethod
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
"""
raise NotImplementedError(
'_process_groundtruth_boxes method should be implemented in subclasses'
'of VRDDetectionEvaluator.')
@abstractmethod
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
"""
raise NotImplementedError(
'_process_detection_boxes method should be implemented in subclasses'
'of VRDDetectionEvaluator.')
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: A numpy array
of structures with the shape [M, 1], representing M tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
standard_fields.InputDataFields.groundtruth_classes: A numpy array of
structures shape [M, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
standard_fields.InputDataFields.groundtruth_image_classes: numpy array
of shape [K] containing verified labels.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_class_tuples = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes])
groundtruth_box_tuples = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes])
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_box_tuples=self._process_groundtruth_boxes(
groundtruth_box_tuples),
groundtruth_class_tuples=groundtruth_class_tuples)
self._image_ids.update([image_id])
all_classes = []
for field in groundtruth_box_tuples.dtype.fields:
all_classes.append(groundtruth_class_tuples[field])
groudtruth_positive_classes = np.unique(np.concatenate(all_classes))
verified_labels = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_image_classes,
np.array([], dtype=int))
self._evaluatable_labels[image_id] = np.unique(
np.concatenate((verified_labels, groudtruth_positive_classes)))
self._negative_labels[image_id] = np.setdiff1d(verified_labels,
groudtruth_positive_classes)
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: A numpy array of
structures with shape [N, 1], representing N tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (as an example
see datatype vrd_box_data_type, single_box_data_type above).
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [N] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: A numpy array
of structures shape [N, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
"""
if image_id not in self._image_ids:
logging.warning('No groundtruth for the image with id %s.', image_id)
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._negative_labels[image_id] = np.array([])
self._evaluatable_labels[image_id] = np.array([])
num_detections = detections_dict[
standard_fields.DetectionResultFields.detection_boxes].shape[0]
detection_class_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_box_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
negative_selector = np.zeros(num_detections, dtype=bool)
selector = np.ones(num_detections, dtype=bool)
# Only check boxable labels
for field in detection_box_tuples.dtype.fields:
# Verify if one of the labels is negative (this is sure FP)
negative_selector |= np.isin(detection_class_tuples[field],
self._negative_labels[image_id])
# Verify if all labels are verified
selector &= np.isin(detection_class_tuples[field],
self._evaluatable_labels[image_id])
selector |= negative_selector
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_box_tuples=self._process_detection_boxes(
detection_box_tuples[selector]),
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores][selector],
detected_class_tuples=detection_class_tuples[selector])
def evaluate(self, relationships=None):
"""Compute evaluation result.
Args:
relationships: A dictionary of numerical label-text label mapping; if
specified, returns per-relationship AP.
Returns:
A dictionary of metrics with the following fields -
summary_metrics:
'weightedAP@<matching_iou_threshold>IOU' : weighted average precision
at the specified IOU threshold.
'AP@<matching_iou_threshold>IOU/<relationship>' : AP per relationship.
'mAP@<matching_iou_threshold>IOU': mean average precision at the
specified IOU threshold.
'Recall@50@<matching_iou_threshold>IOU': recall@50 at the specified IOU
threshold.
'Recall@100@<matching_iou_threshold>IOU': recall@100 at the specified
IOU threshold.
if relationships is specified, returns <relationship> in AP metrics as
readable names, otherwise the names correspond to class numbers.
"""
(weighted_average_precision, mean_average_precision, average_precisions, _,
_, recall_50, recall_100, _, _) = (
self._evaluation.evaluate())
vrd_metrics = {
(self._metric_prefix + 'weightedAP@{}IOU'.format(
self._matching_iou_threshold)):
weighted_average_precision,
self._metric_prefix + 'mAP@{}IOU'.format(self._matching_iou_threshold):
mean_average_precision,
self._metric_prefix + 'Recall@50@{}IOU'.format(
self._matching_iou_threshold):
recall_50,
self._metric_prefix + 'Recall@100@{}IOU'.format(
self._matching_iou_threshold):
recall_100,
}
if relationships:
for key, average_precision in six.iteritems(average_precisions):
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold,
relationships[key])] = average_precision
else:
for key, average_precision in six.iteritems(average_precisions):
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold, key)] = average_precision
return vrd_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = _VRDDetectionEvaluation(
matching_iou_threshold=self._matching_iou_threshold)
self._image_ids.clear()
self._negative_labels.clear()
self._evaluatable_labels.clear()
class VRDRelationDetectionEvaluator(VRDDetectionEvaluator):
"""A class to evaluate VRD detections in relations setting.
Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth
labels datatype is label_data_type.
Expected detection box datatype is vrd_box_data_type, expected detection
labels
datatype is label_data_type.
"""
def __init__(self, matching_iou_threshold=0.5):
super(VRDRelationDetectionEvaluator, self).__init__(
matching_iou_threshold=matching_iou_threshold,
metric_prefix='VRDMetric_Relationships')
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max].
Returns:
Unchanged input.
"""
return groundtruth_box_tuples
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
Returns:
Unchanged input.
"""
return detections_box_tuples
class VRDPhraseDetectionEvaluator(VRDDetectionEvaluator):
"""A class to evaluate VRD detections in phrase setting.
Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth
labels datatype is label_data_type.
Expected detection box datatype is single_box_data_type, expected detection
labels datatype is label_data_type.
"""
def __init__(self, matching_iou_threshold=0.5):
super(VRDPhraseDetectionEvaluator, self).__init__(
matching_iou_threshold=matching_iou_threshold,
metric_prefix='VRDMetric_Phrases')
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
In case of phrase evaluation task, evaluation expects exactly one bounding
box containing all objects in the phrase. This bounding box is computed
as an enclosing box of all groundtruth boxes of a phrase.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See
vrd_box_data_type for an example of structure.
Returns:
result: A numpy array of structures with the shape [M, 1], each
structure containing exactly one named bounding box. i-th output
structure corresponds to the result of processing i-th input structure,
where the named bounding box is computed as an enclosing bounding box
of all bounding boxes of the i-th input structure.
"""
first_box_key = next(six.iterkeys(groundtruth_box_tuples.dtype.fields))
miny = groundtruth_box_tuples[first_box_key][:, 0]
minx = groundtruth_box_tuples[first_box_key][:, 1]
maxy = groundtruth_box_tuples[first_box_key][:, 2]
maxx = groundtruth_box_tuples[first_box_key][:, 3]
for fields in groundtruth_box_tuples.dtype.fields:
miny = np.minimum(groundtruth_box_tuples[fields][:, 0], miny)
minx = np.minimum(groundtruth_box_tuples[fields][:, 1], minx)
maxy = np.maximum(groundtruth_box_tuples[fields][:, 2], maxy)
maxx = np.maximum(groundtruth_box_tuples[fields][:, 3], maxx)
data_result = []
for i in range(groundtruth_box_tuples.shape[0]):
data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))
result = np.array(data_result, dtype=[('box', 'f4', (4,))])
return result
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
In case of phrase evaluation task, evaluation expects exactly one bounding
box containing all objects in the phrase. This bounding box is computed
as an enclosing box of all groundtruth boxes of a phrase.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See
vrd_box_data_type for an example of this structure.
Returns:
result: A numpy array of structures with the shape [M, 1], each
structure containing exactly one named bounding box. i-th output
structure corresponds to the result of processing i-th input structure,
where the named bounding box is computed as an enclosing bounding box
of all bounding boxes of the i-th input structure.
"""
first_box_key = next(six.iterkeys(detections_box_tuples.dtype.fields))
miny = detections_box_tuples[first_box_key][:, 0]
minx = detections_box_tuples[first_box_key][:, 1]
maxy = detections_box_tuples[first_box_key][:, 2]
maxx = detections_box_tuples[first_box_key][:, 3]
for fields in detections_box_tuples.dtype.fields:
miny = np.minimum(detections_box_tuples[fields][:, 0], miny)
minx = np.minimum(detections_box_tuples[fields][:, 1], minx)
maxy = np.maximum(detections_box_tuples[fields][:, 2], maxy)
maxx = np.maximum(detections_box_tuples[fields][:, 3], maxx)
data_result = []
for i in range(detections_box_tuples.shape[0]):
data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))
result = np.array(data_result, dtype=[('box', 'f4', (4,))])
return result
VRDDetectionEvalMetrics = collections.namedtuple('VRDDetectionEvalMetrics', [
'weighted_average_precision', 'mean_average_precision',
'average_precisions', 'precisions', 'recalls', 'recall_50', 'recall_100',
'median_rank_50', 'median_rank_100'
])
class _VRDDetectionEvaluation(object):
"""Performs metric computation for the VRD task. This class is internal.
"""
def __init__(self, matching_iou_threshold=0.5):
"""Constructor.
Args:
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
"""
self._per_image_eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold=matching_iou_threshold)
self._groundtruth_box_tuples = {}
self._groundtruth_class_tuples = {}
self._num_gt_instances = 0
self._num_gt_imgs = 0
self._num_gt_instances_per_relationship = {}
self.clear_detections()
def clear_detections(self):
"""Clears detections."""
self._detection_keys = set()
self._scores = []
self._relation_field_values = []
self._tp_fp_labels = []
self._average_precisions = {}
self._precisions = []
self._recalls = []
def add_single_ground_truth_image_info(
self, image_key, groundtruth_box_tuples, groundtruth_class_tuples):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
groundtruth_class_tuples: A numpy array of structures shape [M, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
if image_key in self._groundtruth_box_tuples:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self._groundtruth_box_tuples[image_key] = groundtruth_box_tuples
self._groundtruth_class_tuples[image_key] = groundtruth_class_tuples
self._update_groundtruth_statistics(groundtruth_class_tuples)
def add_single_detected_image_info(self, image_key, detected_box_tuples,
detected_scores, detected_class_tuples):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_box_tuples: A numpy array of structures with shape [N, 1],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
self._detection_keys.add(image_key)
if image_key in self._groundtruth_box_tuples:
groundtruth_box_tuples = self._groundtruth_box_tuples[image_key]
groundtruth_class_tuples = self._groundtruth_class_tuples[image_key]
else:
groundtruth_box_tuples = np.empty(
shape=[0, 4], dtype=detected_box_tuples.dtype)
groundtruth_class_tuples = np.array([], dtype=detected_class_tuples.dtype)
scores, tp_fp_labels, mapping = (
self._per_image_eval.compute_detection_tp_fp(
detected_box_tuples=detected_box_tuples,
detected_scores=detected_scores,
detected_class_tuples=detected_class_tuples,
groundtruth_box_tuples=groundtruth_box_tuples,
groundtruth_class_tuples=groundtruth_class_tuples))
self._scores += [scores]
self._tp_fp_labels += [tp_fp_labels]
self._relation_field_values += [detected_class_tuples[mapping]['relation']]
def _update_groundtruth_statistics(self, groundtruth_class_tuples):
"""Updates grouth truth statistics.
Args:
groundtruth_class_tuples: A numpy array of structures shape [M, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
self._num_gt_instances += groundtruth_class_tuples.shape[0]
self._num_gt_imgs += 1
for relation_field_value in np.unique(groundtruth_class_tuples['relation']):
if relation_field_value not in self._num_gt_instances_per_relationship:
self._num_gt_instances_per_relationship[relation_field_value] = 0
self._num_gt_instances_per_relationship[relation_field_value] += np.sum(
groundtruth_class_tuples['relation'] == relation_field_value)
def evaluate(self):
"""Computes evaluation result.
Returns:
A named tuple with the following fields -
average_precision: a float number corresponding to average precision.
precisions: an array of precisions.
recalls: an array of recalls.
recall@50: recall computed on 50 top-scoring samples.
recall@100: recall computed on 100 top-scoring samples.
median_rank@50: median rank computed on 50 top-scoring samples.
median_rank@100: median rank computed on 100 top-scoring samples.
"""
if self._num_gt_instances == 0:
logging.warning('No ground truth instances')
if not self._scores:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=bool)
else:
scores = np.concatenate(self._scores)
tp_fp_labels = np.concatenate(self._tp_fp_labels)
relation_field_values = np.concatenate(self._relation_field_values)
for relation_field_value, _ in (six.iteritems(
self._num_gt_instances_per_relationship)):
precisions, recalls = metrics.compute_precision_recall(
scores[relation_field_values == relation_field_value],
tp_fp_labels[relation_field_values == relation_field_value],
self._num_gt_instances_per_relationship[relation_field_value])
self._average_precisions[
relation_field_value] = metrics.compute_average_precision(
precisions, recalls)
self._mean_average_precision = np.mean(
list(self._average_precisions.values()))
self._precisions, self._recalls = metrics.compute_precision_recall(
scores, tp_fp_labels, self._num_gt_instances)
self._weighted_average_precision = metrics.compute_average_precision(
self._precisions, self._recalls)
self._recall_50 = (
metrics.compute_recall_at_k(self._tp_fp_labels, self._num_gt_instances,
50))
self._median_rank_50 = (
metrics.compute_median_rank_at_k(self._tp_fp_labels, 50))
self._recall_100 = (
metrics.compute_recall_at_k(self._tp_fp_labels, self._num_gt_instances,
100))
self._median_rank_100 = (
metrics.compute_median_rank_at_k(self._tp_fp_labels, 100))
return VRDDetectionEvalMetrics(
self._weighted_average_precision, self._mean_average_precision,
self._average_precisions, self._precisions, self._recalls,
self._recall_50, self._recall_100, self._median_rank_50,
self._median_rank_100)
| 26,028 | 43.191851 | 80 | py |
models | models-master/research/object_detection/utils/config_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
import tensorflow.compat.v1 as tf
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "image_resizer"):
return getattr(meta_architecture_config, "image_resizer")
else:
raise ValueError("{} has no image_reszier_config".format(
meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
def get_context_feature_length(model_config):
"""Returns context feature length from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the fixed length of each feature in context_features.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.context_feature_length
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path):
"""Clears fine_tune_checkpoint and writes a new pipeline config file."""
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs["train_config"].fine_tune_checkpoint = ""
configs["train_config"].load_all_detection_checkpoint_vars = False
pipeline_proto = create_pipeline_proto_from_configs(configs)
with tf.gfile.Open(new_pipeline_config_path, "wb") as f:
f.write(text_format.MessageToString(pipeline_proto))
def update_fine_tune_checkpoint_type(train_config):
"""Set `fine_tune_checkpoint_type` using `from_detection_checkpoint`.
`train_config.from_detection_checkpoint` field is deprecated. For backward
compatibility, this function sets `train_config.fine_tune_checkpoint_type`
based on `train_config.from_detection_checkpoint`.
Args:
train_config: train_pb2.TrainConfig proto object.
"""
if not train_config.fine_tune_checkpoint_type:
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = "detection"
else:
train_config.fine_tune_checkpoint_type = "classification"
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture))
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
elif field_name == "retain_original_image_additional_channels_in_eval":
_update_retain_original_image_additional_channels(configs["eval_config"],
value)
elif field_name == "num_classes":
_update_num_classes(configs["model"], value)
elif field_name == "sample_from_datasets_weights":
_update_sample_from_datasets_weights(configs["train_input_config"], value)
elif field_name == "peak_max_pool_kernel_size":
_update_peak_max_pool_kernel_size(configs["model"], value)
elif field_name == "candidate_search_scale":
_update_candidate_search_scale(configs["model"], value)
elif field_name == "candidate_ranking_mode":
_update_candidate_ranking_mode(configs["model"], value)
elif field_name == "score_distance_offset":
_update_score_distance_offset(configs["model"], value)
elif field_name == "box_scale":
_update_box_scale(configs["model"], value)
elif field_name == "keypoint_candidate_score_threshold":
_update_keypoint_candidate_score_threshold(configs["model"], value)
elif field_name == "rescore_instances":
_update_rescore_instances(configs["model"], value)
elif field_name == "unmatched_keypoint_score":
_update_unmatched_keypoint_score(configs["model"], value)
elif field_name == "score_distance_multiplier":
_update_score_distance_multiplier(configs["model"], value)
elif field_name == "std_dev_multiplier":
_update_std_dev_multiplier(configs["model"], value)
elif field_name == "rescoring_threshold":
_update_rescoring_threshold(configs["model"], value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
def _update_retain_original_image_additional_channels(
eval_config,
retain_original_image_additional_channels):
"""Updates eval config to retain original image additional channels or not.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_image_additional_channels: Boolean indicating whether to
retain original image additional channels in eval mode.
"""
eval_config.retain_original_image_additional_channels = (
retain_original_image_additional_channels)
def remove_unnecessary_ema(variables_to_restore, no_ema_collection=None):
"""Remap and Remove EMA variable that are not created during training.
ExponentialMovingAverage.variables_to_restore() returns a map of EMA names
to tf variables to restore. E.g.:
{
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
}
This function takes care of the extra ExponentialMovingAverage variables
that get created during eval but aren't available in the checkpoint, by
remapping the key to the variable itself, and remove the entry of its EMA from
the variables to restore. An example resulting dictionary would look like:
{
conv/batchnorm/gamma: conv/batchnorm/gamma,
conv_4/conv2d_params: conv_4/conv2d_params,
global_step: global_step
}
Args:
variables_to_restore: A dictionary created by ExponentialMovingAverage.
variables_to_restore().
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
Returns:
A variables_to_restore dictionary excluding the collection of unwanted
EMA mapping.
"""
if no_ema_collection is None:
return variables_to_restore
restore_map = {}
for key in variables_to_restore:
if ("ExponentialMovingAverage" in key
and any([name in key for name in no_ema_collection])):
new_key = key.replace("/ExponentialMovingAverage", "")
else:
new_key = key
restore_map[new_key] = variables_to_restore[key]
return restore_map
def _update_num_classes(model_config, num_classes):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model_config.faster_rcnn.num_classes = num_classes
if meta_architecture == "ssd":
model_config.ssd.num_classes = num_classes
def _update_sample_from_datasets_weights(input_reader_config, weights):
"""Updated sample_from_datasets_weights with overrides."""
if len(weights) != len(input_reader_config.sample_from_datasets_weights):
raise ValueError(
"sample_from_datasets_weights override has a different number of values"
" ({}) than the configured dataset weights ({})."
.format(
len(input_reader_config.sample_from_datasets_weights),
len(weights)))
del input_reader_config.sample_from_datasets_weights[:]
input_reader_config.sample_from_datasets_weights.extend(weights)
def _update_peak_max_pool_kernel_size(model_config, kernel_size):
"""Updates the max pool kernel size (NMS) for keypoints in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.peak_max_pool_kernel_size = kernel_size
else:
tf.logging.warning("Ignoring config override key for "
"peak_max_pool_kernel_size since there are multiple "
"keypoint estimation tasks")
def _update_candidate_search_scale(model_config, search_scale):
"""Updates the keypoint candidate search scale in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_search_scale = search_scale
else:
tf.logging.warning("Ignoring config override key for "
"candidate_search_scale since there are multiple "
"keypoint estimation tasks")
def _update_candidate_ranking_mode(model_config, mode):
"""Updates how keypoints are snapped to candidates in CenterNet."""
if mode not in ("min_distance", "score_distance_ratio",
"score_scaled_distance_ratio", "gaussian_weighted"):
raise ValueError("Attempting to set the keypoint candidate ranking mode "
"to {}, but the only options are 'min_distance', "
"'score_distance_ratio', 'score_scaled_distance_ratio', "
"'gaussian_weighted'.".format(mode))
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_ranking_mode = mode
else:
tf.logging.warning("Ignoring config override key for "
"candidate_ranking_mode since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_offset(model_config, offset):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_offset = offset
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_offset since there are multiple "
"keypoint estimation tasks")
def _update_box_scale(model_config, box_scale):
"""Updates the keypoint candidate search region. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.box_scale = box_scale
else:
tf.logging.warning("Ignoring config override key for box_scale since "
"there are multiple keypoint estimation tasks")
def _update_keypoint_candidate_score_threshold(model_config, threshold):
"""Updates the keypoint candidate score threshold. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.keypoint_candidate_score_threshold = threshold
else:
tf.logging.warning("Ignoring config override key for "
"keypoint_candidate_score_threshold since there are "
"multiple keypoint estimation tasks")
def _update_rescore_instances(model_config, should_rescore):
"""Updates whether boxes should be rescored based on keypoint confidences."""
if isinstance(should_rescore, str):
should_rescore = True if should_rescore == "True" else False
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescore_instances = should_rescore
else:
tf.logging.warning("Ignoring config override key for "
"rescore_instances since there are multiple keypoint "
"estimation tasks")
def _update_unmatched_keypoint_score(model_config, score):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.unmatched_keypoint_score = score
else:
tf.logging.warning("Ignoring config override key for "
"unmatched_keypoint_score since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_multiplier(model_config, score_distance_multiplier):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_multiplier = score_distance_multiplier
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_multiplier since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
def _update_std_dev_multiplier(model_config, std_dev_multiplier):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.std_dev_multiplier = std_dev_multiplier
else:
tf.logging.warning("Ignoring config override key for "
"std_dev_multiplier since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
def _update_rescoring_threshold(model_config, rescoring_threshold):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescoring_threshold = rescoring_threshold
else:
tf.logging.warning("Ignoring config override key for "
"rescoring_threshold since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
| 51,336 | 39.359277 | 80 | py |
models | models-master/research/object_detection/utils/model_util.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def extract_submodel(model, inputs, outputs, name=None):
"""Extracts a section of a Keras model into a new model.
This method walks an existing model from the specified outputs back to the
specified inputs in order to construct a new model containing only a portion
of the old model, while sharing the layers and weights with the original
model.
WARNING: This method does not work for submodels containing layers that have
been used multiple times in the original model, or in other models beyond
the original model. (E.g. does not work for submodels that contain layers that
use shared weights). This also means that multiple overlapping submodels
cannot be extracted from the same model.
It also relies on recursion and will hit python's recursion limit for large
submodels.
Args:
model: The existing Keras model this method extracts a submodel from.
inputs: The layer inputs in the existing model that start the submodel
outputs: The layer outputs in the existing model that should be output by
the submodel
name: The name for the extracted model
Returns:
The extracted submodel specified by the given inputs and outputs
"""
output_to_layer = {}
output_to_layer_input = {}
for layer in model.layers:
layer_output = layer.output
layer_inputs = layer.input
output_to_layer[layer_output.experimental_ref()] = layer
output_to_layer_input[layer_output.experimental_ref()] = layer_inputs
model_inputs_dict = {}
memoized_results = {}
# Relies on recursion, very low limit in python
def _recurse_in_model(tensor):
"""Walk the existing model recursively to copy a submodel."""
if tensor.experimental_ref() in memoized_results:
return memoized_results[tensor.experimental_ref()]
if (tensor.experimental_ref() == inputs.experimental_ref()) or (
isinstance(inputs, list) and tensor in inputs):
if tensor.experimental_ref() not in model_inputs_dict:
model_inputs_dict[tensor.experimental_ref()] = tf.keras.layers.Input(
tensor=tensor)
out = model_inputs_dict[tensor.experimental_ref()]
else:
cur_inputs = output_to_layer_input[tensor.experimental_ref()]
cur_layer = output_to_layer[tensor.experimental_ref()]
if isinstance(cur_inputs, list):
out = cur_layer([_recurse_in_model(inp) for inp in cur_inputs])
else:
out = cur_layer(_recurse_in_model(cur_inputs))
memoized_results[tensor.experimental_ref()] = out
return out
if isinstance(outputs, list):
model_outputs = [_recurse_in_model(tensor) for tensor in outputs]
else:
model_outputs = _recurse_in_model(outputs)
if isinstance(inputs, list):
model_inputs = [
model_inputs_dict[tensor.experimental_ref()] for tensor in inputs
]
else:
model_inputs = model_inputs_dict[inputs.experimental_ref()]
return tf.keras.Model(inputs=model_inputs, outputs=model_outputs, name=name)
| 3,848 | 38.680412 | 80 | py |
models | models-master/research/object_detection/utils/context_manager.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python context management helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class IdentityContextManager(object):
"""Returns an identity context manager that does nothing.
This is helpful in setting up conditional `with` statement as below:
with slim.arg_scope(x) if use_slim_scope else IdentityContextManager():
do_stuff()
"""
def __enter__(self):
return None
def __exit__(self, exec_type, exec_value, traceback):
del exec_type
del exec_value
del traceback
return False
| 1,277 | 30.95 | 80 | py |
models | models-master/research/object_detection/utils/model_util_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow.compat.v1 as tf
from object_detection.utils import model_util
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExtractSubmodelUtilTest(tf.test.TestCase):
def test_simple_model(self):
inputs = tf.keras.Input(shape=(256,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = tf.keras.layers.Dense(128, activation='relu', name='a')(inputs)
x = tf.keras.layers.Dense(64, activation='relu', name='b')(x)
x = tf.keras.layers.Dense(32, activation='relu', name='c')(x)
x = tf.keras.layers.Dense(16, activation='relu', name='d')(x)
x = tf.keras.layers.Dense(8, activation='relu', name='e')(x)
predictions = tf.keras.layers.Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=predictions)
new_in = model.get_layer(
name='b').input
new_out = model.get_layer(
name='d').output
new_model = model_util.extract_submodel(
model=model,
inputs=new_in,
outputs=new_out)
batch_size = 3
ones = tf.ones((batch_size, 128))
final_out = new_model(ones)
self.assertAllEqual(final_out.shape, (batch_size, 16))
if __name__ == '__main__':
tf.test.main()
| 2,187 | 34.290323 | 80 | py |
models | models-master/research/object_detection/utils/np_box_ops_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_box_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_ops
class BoxOpsTests(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxes1 = boxes1
self.boxes2 = boxes2
def testArea(self):
areas = np_box_ops.area(self.boxes1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_box_ops.intersection(self.boxes1, self.boxes2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_box_ops.iou(self.boxes1, self.boxes2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
boxes1 = np.array([[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75]],
dtype=np.float32)
boxes2 = np.array([[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]],
dtype=np.float32)
ioa21 = np_box_ops.ioa(boxes2, boxes1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| 2,604 | 34.684932 | 80 | py |
models | models-master/research/object_detection/utils/spatial_transform_ops_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.spatial_transform_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import test_case
class BoxGridCoordinateTest(test_case.TestCase):
def test_4x4_grid(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=4, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0.75, 2.25, 3.75, 5.25]]])
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x2_grid(self):
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_x=2, size_y=2)
boxes = np.array([[[0., 0., 6., 3.],
[0., 0., 3., 6.]]], dtype=np.float32)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[1.5, 4.5],
[0.75, 2.25]]])
expected_grid_x = np.array([[[0.75, 2.25],
[1.5, 4.5]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x4_grid(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=2, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[1.5, 4.5]]])
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x4_grid_with_aligned_corner(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=2, size_x=4,
align_corners=True)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0, 6]]])
expected_grid_x = np.array([[[0, 2, 4, 6]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_offgrid_boxes(self):
boxes = np.array([[[1.2, 2.3, 7.2, 8.3]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=4, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0.75, 2.25, 3.75, 5.25]]]) + 1.2
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]]) + 2.3
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
class FeatureGridCoordinateTest(test_case.TestCase):
def test_snap_box_points_to_nearest_4_pixels(self):
box_grid_y = np.array([[[1.5, 4.6]]], dtype=np.float32)
box_grid_x = np.array([[[2.4, 5.3]]], dtype=np.float32)
def graph_fn(box_grid_y, box_grid_x):
return spatial_ops.feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
(feature_grid_y0,
feature_grid_x0, feature_grid_y1, feature_grid_x1) = self.execute(
graph_fn, [box_grid_y, box_grid_x])
expected_grid_y0 = np.array([[[1, 4]]])
expected_grid_y1 = np.array([[[2, 5]]])
expected_grid_x0 = np.array([[[2, 5]]])
expected_grid_x1 = np.array([[[3, 6]]])
self.assertAllEqual(expected_grid_y0, feature_grid_y0)
self.assertAllEqual(expected_grid_y1, feature_grid_y1)
self.assertAllEqual(expected_grid_x0, feature_grid_x0)
self.assertAllEqual(expected_grid_x1, feature_grid_x1)
def test_snap_box_points_outside_pixel_grid_to_nearest_neighbor(self):
box_grid_y = np.array([[[0.33, 1., 1.66]]], dtype=np.float32)
box_grid_x = np.array([[[-0.5, 1., 1.66]]], dtype=np.float32)
def graph_fn(box_grid_y, box_grid_x):
return spatial_ops.feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
(feature_grid_y0,
feature_grid_x0, feature_grid_y1, feature_grid_x1) = self.execute(
graph_fn, [box_grid_y, box_grid_x])
expected_grid_y0 = np.array([[[0, 1, 1]]])
expected_grid_y1 = np.array([[[1, 2, 2]]])
expected_grid_x0 = np.array([[[-1, 1, 1]]])
expected_grid_x1 = np.array([[[0, 2, 2]]])
self.assertAllEqual(expected_grid_y0, feature_grid_y0)
self.assertAllEqual(expected_grid_y1, feature_grid_y1)
self.assertAllEqual(expected_grid_x0, feature_grid_x0)
self.assertAllEqual(expected_grid_x1, feature_grid_x1)
class RavelIndicesTest(test_case.TestCase):
def test_feature_point_indices(self):
feature_grid_y = np.array([[[1, 2, 4, 5],
[2, 3, 4, 5]]], dtype=np.int32)
feature_grid_x = np.array([[[1, 3, 4],
[2, 3, 4]]], dtype=np.int32)
num_feature_levels = 2
feature_height = 6
feature_width = 5
box_levels = np.array([[0, 1]], dtype=np.int32)
def graph_fn(feature_grid_y, feature_grid_x, box_levels):
return spatial_ops.ravel_indices(feature_grid_y, feature_grid_x,
num_feature_levels, feature_height,
feature_width, box_levels)
indices = self.execute(graph_fn,
[feature_grid_y, feature_grid_x, box_levels])
expected_indices = np.array([[[[6, 8, 9],
[11, 13, 14],
[21, 23, 24],
[26, 28, 29]],
[[42, 43, 44],
[47, 48, 49],
[52, 53, 54],
[57, 58, 59]]]])
self.assertAllEqual(expected_indices.flatten(), indices)
class MultiLevelRoIAlignTest(test_case.TestCase):
def test_perfectly_aligned_cell_center_and_feature_pixels(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.arange(25).reshape(1, 5, 5, 1).astype(np.float32)
boxes = np.array([[[0, 0, 1.0, 1.0]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[6], [8]],
[[16], [18]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_interpolation_with_4_points_per_bin(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[1, 1],
num_samples_per_cell_y=2,
num_samples_per_cell_x=2)
image = np.array([[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]],
dtype=np.float32)
boxes = np.array([[[1./3, 1./3, 2./3, 2./3]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[(7.25 + 7.75 + 9.25 + 9.75) / 4]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_1x1_crop_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[1, 1])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_3x3_crops_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[9./6], [11./6], [13./6]],
[[13./6], [15./6], [17./6]],
[[17./6], [19./6], [21./6]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_2x2_crops_on_3x3_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]],
dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]],
dtype=np.float32)
box_levels = np.array([[0, 0]], dtype=np.int32)
expected_output = [[[[[3], [4]],
[[6], [7]]],
[[[2.], [2.5]],
[[3.5], [4.]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_2x2_crop_on_4x4_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.array([[[[0], [1], [2], [3]],
[[4], [5], [6], [7]],
[[8], [9], [10], [11]],
[[12], [13], [14], [15]]]],
dtype=np.float32)
boxes = np.array([[[0, 0, 2./3, 2./3],
[0, 0, 2./3, 1.0]]],
dtype=np.float32)
box_levels = np.array([[0, 0]], dtype=np.int32)
expected_output = np.array([[[[[2.5], [3.5]],
[[6.5], [7.5]]],
[[[2.75], [4.25]],
[[6.75], [8.25]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_extrapolate_3x3_crop_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[-1, -1, 2, 2]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = np.array([[[[[0.25], [0.75], [0.5]],
[[1.0], [2.5], [1.5]],
[[0.75], [1.75], [1]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_extrapolate_with_non_zero_value(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3],
extrapolation_value=2.0)
image = np.array([[[[4], [4]],
[[4], [4]]]], dtype=np.float32)
boxes = np.array([[[-1, -1, 2, 2]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = np.array([[[[[2.5], [3.0], [2.5]],
[[3.0], [4.0], [3.0]],
[[2.5], [3.0], [2.5]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_multilevel_roi_align(self):
image_size = 640
fpn_min_level = 2
fpn_max_level = 5
batch_size = 1
output_size = [2, 2]
num_filters = 1
features = []
for level in range(fpn_min_level, fpn_max_level + 1):
feat_size = int(image_size / 2**level)
features.append(
float(level) *
np.ones([batch_size, feat_size, feat_size, num_filters],
dtype=np.float32))
boxes = np.array(
[
[
[0, 0, 111, 111], # Level 2.
[0, 0, 113, 113], # Level 3.
[0, 0, 223, 223], # Level 3.
[0, 0, 225, 225], # Level 4.
[0, 0, 449, 449] # Level 5.
],
],
dtype=np.float32) / image_size
levels = np.array([[0, 1, 1, 2, 3]], dtype=np.int32)
def graph_fn(feature1, feature2, feature3, feature4, boxes, levels):
roi_features = spatial_ops.multilevel_roi_align(
[feature1, feature2, feature3, feature4],
boxes,
levels,
output_size)
return roi_features
roi_features = self.execute(graph_fn, features + [boxes, levels])
self.assertAllClose(roi_features[0][0], 2 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][1], 3 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][2], 3 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][3], 4 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][4], 5 * np.ones((2, 2, 1)))
def test_large_input(self):
if self.has_tpu():
input_size = 1408
min_level = 2
max_level = 6
batch_size = 2
num_boxes = 512
num_filters = 256
output_size = [7, 7]
features = []
for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level)
features.append(
np.reshape(
np.arange(
batch_size * feat_size * feat_size * num_filters,
dtype=np.float32),
[batch_size, feat_size, feat_size, num_filters]))
boxes = np.array([
[[0, 0, 256, 256]]*num_boxes,
], dtype=np.float32) / input_size
boxes = np.tile(boxes, [batch_size, 1, 1])
levels = np.random.randint(5, size=[batch_size, num_boxes],
dtype=np.int32)
def crop_and_resize_fn():
tf_features = [
tf.constant(feature, dtype=tf.bfloat16) for feature in features
]
return spatial_ops.multilevel_roi_align(
tf_features, tf.constant(boxes), tf.constant(levels), output_size)
roi_features = self.execute_tpu(crop_and_resize_fn, [])
self.assertEqual(roi_features.shape,
(batch_size, num_boxes, output_size[0],
output_size[1], num_filters))
class MatMulCropAndResizeTest(test_case.TestCase):
def testMatMulCropAndResize2x2To1x1(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To1x1Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To3x3(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
expected_output = [[[[[1.0], [1.5], [2.0]],
[[2.0], [2.5], [3.0]],
[[3.0], [3.5], [4.0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To3x3Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32)
expected_output = [[[[[4.0], [3.5], [3.0]],
[[3.0], [2.5], [2.0]],
[[2.0], [1.5], [1.0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
expected_output = [[[[[1], [3]], [[7], [9]]],
[[[1], [2]], [[4], [5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testBatchMatMulCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[9], [7]], [[3], [1]]],
[[[5], [4]], [[2], [1]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMultilevelMatMulCropAndResize(self):
def graph_fn(image1, image2, boxes, box_levels):
return spatial_ops.multilevel_matmul_crop_and_resize([image1, image2],
boxes,
box_levels,
crop_size=[2, 2])
image = [np.array([[[[1, 0], [2, 0], [3, 0]],
[[4, 0], [5, 0], [6, 0]],
[[7, 0], [8, 0], [9, 0]]],
[[[1, 0], [2, 0], [3, 0]],
[[4, 0], [5, 0], [6, 0]],
[[7, 0], [8, 0], [9, 0]]]], dtype=np.float32),
np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)]
boxes = np.array([[[1, 1, 0, 0],
[.5, .5, 0, 0]],
[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
box_levels = np.array([[0, 1], [1, 1]], dtype=np.int32)
expected_output = [[[[[9, 0], [7, 0]], [[3, 0], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]],
[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]]]
crop_output = self.execute(graph_fn, image + [boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
class NativeCropAndResizeTest(test_case.TestCase):
def testBatchCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.native_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute_cpu(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMultilevelBatchCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image1, image2, boxes, box_levels):
return spatial_ops.multilevel_native_crop_and_resize([image1, image2],
boxes,
box_levels,
crop_size=[2, 2])
image = [np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32),
np.array([[[[1, 0], [2, 1]],
[[4, 3], [5, 4]]],
[[[1, 0], [2, 1]],
[[4, 3], [5, 4]]]], dtype=np.float32)]
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
box_levels = np.array([[0, 1], [0, 0]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [1.5, 0.5]], [[2.5, 1.5], [3, 2]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute_cpu(graph_fn, image + [boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
if __name__ == '__main__':
tf.test.main()
| 25,710 | 41.567881 | 80 | py |
models | models-master/research/object_detection/utils/variables_helper.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for manipulating collections of variables during training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import re
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.ops import variables as tf_variables
# Maps checkpoint types to variable name prefixes that are no longer
# supported
DETECTION_FEATURE_EXTRACTOR_MSG = """\
The checkpoint type 'detection' is not supported when it contains variable
names with 'feature_extractor'. Please download the new checkpoint file
from model zoo.
"""
DEPRECATED_CHECKPOINT_MAP = {
'detection': ('feature_extractor', DETECTION_FEATURE_EXTRACTOR_MSG)
}
# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py
def filter_variables(variables, filter_regex_list, invert=False):
"""Filters out the variables matching the filter_regex.
Filter out the variables whose name matches the any of the regular
expressions in filter_regex_list and returns the remaining variables.
Optionally, if invert=True, the complement set is returned.
Args:
variables: a list of tensorflow variables.
filter_regex_list: a list of string regular expressions.
invert: (boolean). If True, returns the complement of the filter set; that
is, all variables matching filter_regex are kept and all others discarded.
Returns:
a list of filtered variables.
"""
kept_vars = []
variables_to_ignore_patterns = list([fre for fre in filter_regex_list if fre])
for var in variables:
add = True
for pattern in variables_to_ignore_patterns:
if re.match(pattern, var.op.name):
add = False
break
if add != invert:
kept_vars.append(var)
return kept_vars
def multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):
"""Multiply gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
multiplier: A (float) multiplier to apply to each gradient matching the
regular expression.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples).
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
for var in matching_vars:
logging.info('Applying multiplier %f to variable [%s]',
multiplier, var.op.name)
grad_multipliers = {var: float(multiplier) for var in matching_vars}
return slim.learning.multiply_gradients(grads_and_vars,
grad_multipliers)
def freeze_gradients_matching_regex(grads_and_vars, regex_list):
"""Freeze gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples) that do not
contain the variables and gradients matching the regex.
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
kept_grads_and_vars = [pair for pair in grads_and_vars
if pair[1] not in matching_vars]
for var in matching_vars:
logging.info('Freezing variable [%s]', var.op.name)
return kept_grads_and_vars
def get_variables_available_in_checkpoint(variables,
checkpoint_path,
include_global_step=True):
"""Returns the subset of variables available in the checkpoint.
Inspects given checkpoint and returns the subset of variables that are
available in it.
TODO(rathodv): force input and output to be a dictionary.
Args:
variables: a list or dictionary of variables to find in checkpoint.
checkpoint_path: path to the checkpoint to restore variables from.
include_global_step: whether to include `global_step` variable, if it
exists. Default True.
Returns:
A list or dictionary of variables.
Raises:
ValueError: if `variables` is not a list or dict.
"""
if isinstance(variables, list):
variable_names_map = {}
for variable in variables:
if isinstance(variable, tf_variables.PartitionedVariable):
name = variable.name
else:
name = variable.op.name
variable_names_map[name] = variable
elif isinstance(variables, dict):
variable_names_map = variables
else:
raise ValueError('`variables` is expected to be a list or dict.')
ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
for variable_name, variable in sorted(variable_names_map.items()):
if variable_name in ckpt_vars_to_shape_map:
if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():
vars_in_ckpt[variable_name] = variable
else:
logging.warning('Variable [%s] is available in checkpoint, but has an '
'incompatible shape with model variable. Checkpoint '
'shape: [%s], model variable shape: [%s]. This '
'variable will not be initialized from the checkpoint.',
variable_name, ckpt_vars_to_shape_map[variable_name],
variable.shape.as_list())
else:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
if isinstance(variables, list):
return list(vars_in_ckpt.values())
return vars_in_ckpt
def get_global_variables_safely():
"""If not executing eagerly, returns tf.global_variables().
Raises a ValueError if eager execution is enabled,
because the variables are not tracked when executing eagerly.
If executing eagerly, use a Keras model's .variables property instead.
Returns:
The result of tf.global_variables()
"""
with tf.init_scope():
if tf.executing_eagerly():
raise ValueError("Global variables collection is not tracked when "
"executing eagerly. Use a Keras model's `.variables` "
"attribute instead.")
return tf.global_variables()
def ensure_checkpoint_supported(checkpoint_path, checkpoint_type, model_dir):
"""Ensures that the given checkpoint can be properly loaded.
Performs the following checks
1. Raises an error if checkpoint_path and model_dir are same.
2. Checks that checkpoint_path does not contain a deprecated checkpoint file
by inspecting its variables.
Args:
checkpoint_path: str, path to checkpoint.
checkpoint_type: str, denotes the type of checkpoint.
model_dir: The model directory to store intermediate training checkpoints.
Raises:
RuntimeError: If
1. We detect an deprecated checkpoint file.
2. model_dir and checkpoint_path are in the same directory.
"""
variables = tf.train.list_variables(checkpoint_path)
if checkpoint_type in DEPRECATED_CHECKPOINT_MAP:
blocked_prefix, msg = DEPRECATED_CHECKPOINT_MAP[checkpoint_type]
for var_name, _ in variables:
if var_name.startswith(blocked_prefix):
tf.logging.error('Found variable name - %s with prefix %s', var_name,
blocked_prefix)
raise RuntimeError(msg)
checkpoint_path_dir = os.path.abspath(os.path.dirname(checkpoint_path))
model_dir = os.path.abspath(model_dir)
if model_dir == checkpoint_path_dir:
raise RuntimeError(
('Checkpoint dir ({}) and model_dir ({}) cannot be same.'.format(
checkpoint_path_dir, model_dir) +
(' Please set model_dir to a different path.')))
| 8,682 | 36.588745 | 80 | py |
models | models-master/research/object_detection/utils/per_image_vrd_evaluation.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates Visual Relations Detection(VRD) result evaluation on an image.
Annotate each VRD result as true positives or false positive according to
a predefined IOU ratio. Multi-class detection is supported by default.
Based on the settings, per image evaluation is performed either on phrase
detection subtask or on relation detection subtask.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class PerImageVRDEvaluation(object):
"""Evaluate vrd result of a single image."""
def __init__(self, matching_iou_threshold=0.5):
"""Initialized PerImageVRDEvaluation by evaluation parameters.
Args:
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not;
in phrase detection subtask.
"""
self.matching_iou_threshold = matching_iou_threshold
def compute_detection_tp_fp(self, detected_box_tuples, detected_scores,
detected_class_tuples, groundtruth_box_tuples,
groundtruth_class_tuples):
"""Evaluates VRD as being tp, fp from a single image.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
detected_scores: A float numpy array of shape [N,], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
groundtruth_class_tuples: A numpy array of structures shape [M,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
Returns:
scores: A single numpy array with shape [N,], representing N scores
detected with object class, sorted in descentent order.
tp_fp_labels: A single boolean numpy array of shape [N,], representing N
True/False positive label, one label per tuple. The labels are sorted
so that the order of the labels matches the order of the scores.
result_mapping: A numpy array with shape [N,] with original index of each
entry.
"""
scores, tp_fp_labels, result_mapping = self._compute_tp_fp(
detected_box_tuples=detected_box_tuples,
detected_scores=detected_scores,
detected_class_tuples=detected_class_tuples,
groundtruth_box_tuples=groundtruth_box_tuples,
groundtruth_class_tuples=groundtruth_class_tuples)
return scores, tp_fp_labels, result_mapping
def _compute_tp_fp(self, detected_box_tuples, detected_scores,
detected_class_tuples, groundtruth_box_tuples,
groundtruth_class_tuples):
"""Labels as true/false positives detection tuples across all classes.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N,], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_class_tuples: A numpy array of structures shape [M,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
Returns:
scores: A single numpy array with shape [N,], representing N scores
detected with object class, sorted in descentent order.
tp_fp_labels: A single boolean numpy array of shape [N,], representing N
True/False positive label, one label per tuple. The labels are sorted
so that the order of the labels matches the order of the scores.
result_mapping: A numpy array with shape [N,] with original index of each
entry.
"""
unique_gt_tuples = np.unique(
np.concatenate((groundtruth_class_tuples, detected_class_tuples)))
result_scores = []
result_tp_fp_labels = []
result_mapping = []
for unique_tuple in unique_gt_tuples:
detections_selector = (detected_class_tuples == unique_tuple)
gt_selector = (groundtruth_class_tuples == unique_tuple)
selector_mapping = np.where(detections_selector)[0]
detection_scores_per_tuple = detected_scores[detections_selector]
detection_box_per_tuple = detected_box_tuples[detections_selector]
sorted_indices = np.argsort(detection_scores_per_tuple)
sorted_indices = sorted_indices[::-1]
tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_box_tuples=detection_box_per_tuple[sorted_indices],
groundtruth_box_tuples=groundtruth_box_tuples[gt_selector])
result_scores.append(detection_scores_per_tuple[sorted_indices])
result_tp_fp_labels.append(tp_fp_labels)
result_mapping.append(selector_mapping[sorted_indices])
if result_scores:
result_scores = np.concatenate(result_scores)
result_tp_fp_labels = np.concatenate(result_tp_fp_labels)
result_mapping = np.concatenate(result_mapping)
else:
result_scores = np.array([], dtype=float)
result_tp_fp_labels = np.array([], dtype=bool)
result_mapping = np.array([], dtype=int)
sorted_indices = np.argsort(result_scores)
sorted_indices = sorted_indices[::-1]
return result_scores[sorted_indices], result_tp_fp_labels[
sorted_indices], result_mapping[sorted_indices]
def _get_overlaps_and_scores_relation_tuples(self, detected_box_tuples,
groundtruth_box_tuples):
"""Computes overlaps and scores between detected and groundtruth tuples.
Both detections and groundtruth boxes have the same class tuples.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
Returns:
result_iou: A float numpy array of size
[num_detected_tuples, num_gt_box_tuples].
"""
result_iou = np.ones(
(detected_box_tuples.shape[0], groundtruth_box_tuples.shape[0]),
dtype=float)
for field in detected_box_tuples.dtype.fields:
detected_boxlist_field = np_box_list.BoxList(detected_box_tuples[field])
gt_boxlist_field = np_box_list.BoxList(groundtruth_box_tuples[field])
iou_field = np_box_list_ops.iou(detected_boxlist_field, gt_boxlist_field)
result_iou = np.minimum(iou_field, result_iou)
return result_iou
def _compute_tp_fp_for_single_class(self, detected_box_tuples,
groundtruth_box_tuples):
"""Labels boxes detected with the same class from the same image as tp/fp.
Detection boxes are expected to be already sorted by score.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
Returns:
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_box_tuples.size == 0:
return np.array([], dtype=bool)
min_iou = self._get_overlaps_and_scores_relation_tuples(
detected_box_tuples, groundtruth_box_tuples)
num_detected_tuples = detected_box_tuples.shape[0]
tp_fp_labels = np.zeros(num_detected_tuples, dtype=bool)
if min_iou.shape[1] > 0:
max_overlap_gt_ids = np.argmax(min_iou, axis=1)
is_gt_tuple_detected = np.zeros(min_iou.shape[1], dtype=bool)
for i in range(num_detected_tuples):
gt_id = max_overlap_gt_ids[i]
if min_iou[i, gt_id] >= self.matching_iou_threshold:
if not is_gt_tuple_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_tuple_detected[gt_id] = True
return tp_fp_labels
| 10,396 | 44.204348 | 80 | py |
models | models-master/research/object_detection/utils/np_box_list.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in range(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
| 4,656 | 32.746377 | 80 | py |
models | models-master/research/object_detection/utils/label_map_util_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.label_map_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import label_map_util
class LabelMapUtilTest(tf.test.TestCase):
def _generate_label_map(self, num_classes):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
for i in range(1, num_classes + 1):
item = label_map_proto.item.add()
item.id = i
item.name = 'label_' + str(i)
item.display_name = str(i)
return label_map_proto
def _generate_label_map_with_hierarchy(self, num_classes, ancestors_dict,
descendants_dict):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
for i in range(1, num_classes + 1):
item = label_map_proto.item.add()
item.id = i
item.name = 'label_' + str(i)
item.display_name = str(i)
if i in ancestors_dict:
for anc_i in ancestors_dict[i]:
item.ancestor_ids.append(anc_i)
if i in descendants_dict:
for desc_i in descendants_dict[i]:
item.descendant_ids.append(desc_i)
return label_map_proto
def test_get_label_map_dict(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_keypoint_label_map_dict(self):
label_map_string = """
item: {
id: 1
name: 'face'
display_name: 'face'
keypoints {
id: 0
label: 'left_eye'
}
keypoints {
id: 1
label: 'right_eye'
}
}
item: {
id: 2
name: '/m/01g317'
display_name: 'person'
keypoints {
id: 2
label: 'left_shoulder'
}
keypoints {
id: 3
label: 'right_shoulder'
}
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_keypoint_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['left_eye'], 0)
self.assertEqual(label_map_dict['right_eye'], 1)
self.assertEqual(label_map_dict['left_shoulder'], 2)
self.assertEqual(label_map_dict['right_shoulder'], 3)
def test_get_keypoint_label_map_dict_invalid(self):
label_map_string = """
item: {
id: 1
name: 'face'
display_name: 'face'
keypoints {
id: 0
label: 'left_eye'
}
keypoints {
id: 1
label: 'right_eye'
}
}
item: {
id: 2
name: '/m/01g317'
display_name: 'person'
keypoints {
id: 0
label: 'left_shoulder'
}
keypoints {
id: 1
label: 'right_shoulder'
}
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
with self.assertRaises(ValueError):
_ = label_map_util.get_keypoint_label_map_dict(
label_map_path)
def test_get_label_map_dict_from_proto(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_proto = text_format.Parse(
label_map_string, string_int_label_map_pb2.StringIntLabelMap())
label_map_dict = label_map_util.get_label_map_dict(label_map_proto)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_display(self):
label_map_string = """
item {
id:2
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, use_display_name=True)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_load_bad_label_map(self):
label_map_string = """
item {
id:0
name:'class that should not be indexed at zero'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
with self.assertRaises(ValueError):
label_map_util.load_labelmap(label_map_path)
def test_load_label_map_with_background(self):
label_map_string = """
item {
id:0
name:'background'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_with_fill_in_gaps_and_background(self):
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, fill_in_gaps_and_background=True)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['2'], 2)
self.assertEqual(label_map_dict['cat'], 3)
self.assertEqual(len(label_map_dict), max(label_map_dict.values()) + 1)
def test_keep_categories_with_unique_id(self):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'child'
}
item {
id:1
name:'person'
}
item {
id:1
name:'n00007846'
}
"""
text_format.Parse(label_map_string, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
self.assertListEqual([{
'id': 2,
'name': u'cat'
}, {
'id': 1,
'name': u'child'
}], categories)
def test_convert_label_map_to_categories_no_label_map(self):
categories = label_map_util.convert_label_map_to_categories(
None, max_num_classes=3)
expected_categories_list = [{
'name': u'category_1',
'id': 1
}, {
'name': u'category_2',
'id': 2
}, {
'name': u'category_3',
'id': 3
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_to_categories_lvis_frequency_and_counts(self):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
label_map_string = """
item {
id:1
name:'person'
frequency: FREQUENT
instance_count: 1000
}
item {
id:2
name:'dog'
frequency: COMMON
instance_count: 100
}
item {
id:3
name:'cat'
frequency: RARE
instance_count: 10
}
"""
text_format.Parse(label_map_string, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
self.assertListEqual([{
'id': 1,
'name': u'person',
'frequency': 'f',
'instance_count': 1000
}, {
'id': 2,
'name': u'dog',
'frequency': 'c',
'instance_count': 100
}, {
'id': 3,
'name': u'cat',
'frequency': 'r',
'instance_count': 10
}], categories)
def test_convert_label_map_to_categories(self):
label_map_proto = self._generate_label_map(num_classes=4)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
expected_categories_list = [{
'name': u'1',
'id': 1,
}, {
'name': u'2',
'id': 2,
}, {
'name': u'3',
'id': 3,
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_with_keypoints_to_categories(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'nose'
}
keypoints: {
id: 2
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Parse(label_map_str, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=1)
self.assertEqual('person', categories[0]['name'])
self.assertEqual(1, categories[0]['id'])
self.assertEqual(1, categories[0]['keypoints']['nose'])
self.assertEqual(2, categories[0]['keypoints']['ear'])
def test_disallow_duplicate_keypoint_ids(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'right_elbow'
}
keypoints: {
id: 1
label: 'left_elbow'
}
}
item {
id: 2
name: 'face'
keypoints: {
id: 3
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Parse(label_map_str, label_map_proto)
with self.assertRaises(ValueError):
label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
def test_convert_label_map_to_categories_with_few_classes(self):
label_map_proto = self._generate_label_map(num_classes=4)
cat_no_offset = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
expected_categories_list = [{
'name': u'1',
'id': 1
}, {
'name': u'2',
'id': 2
}]
self.assertListEqual(expected_categories_list, cat_no_offset)
def test_get_max_label_map_index(self):
num_classes = 4
label_map_proto = self._generate_label_map(num_classes=num_classes)
max_index = label_map_util.get_max_label_map_index(label_map_proto)
self.assertEqual(num_classes, max_index)
def test_create_category_index(self):
categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}]
category_index = label_map_util.create_category_index(categories)
self.assertDictEqual({
1: {
'name': u'1',
'id': 1
},
2: {
'name': u'2',
'id': 2
}
}, category_index)
def test_create_categories_from_labelmap(self):
label_map_string = """
item {
id:1
name:'dog'
}
item {
id:2
name:'cat'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
categories = label_map_util.create_categories_from_labelmap(label_map_path)
self.assertListEqual([{
'name': u'dog',
'id': 1
}, {
'name': u'cat',
'id': 2
}], categories)
def test_create_category_index_from_labelmap(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, category_index)
def test_create_category_index_from_labelmap_display(self):
label_map_string = """
item {
id:2
name:'cat'
display_name:'meow'
}
item {
id:1
name:'dog'
display_name:'woof'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(
label_map_path, False))
self.assertDictEqual({
1: {
'name': u'woof',
'id': 1
},
2: {
'name': u'meow',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(label_map_path))
def test_get_label_map_hierarchy_lut(self):
num_classes = 5
ancestors = {2: [1, 3], 5: [1]}
descendants = {1: [2], 5: [1, 2]}
label_map = self._generate_label_map_with_hierarchy(num_classes, ancestors,
descendants)
gt_hierarchy_dict_lut = {
'ancestors':
np.array([
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[1, 0, 0, 0, 1],
]),
'descendants':
np.array([
[1, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[1, 1, 0, 0, 1],
]),
}
ancestors_lut, descendants_lut = (
label_map_util.get_label_map_hierarchy_lut(label_map, True))
np.testing.assert_array_equal(gt_hierarchy_dict_lut['ancestors'],
ancestors_lut)
np.testing.assert_array_equal(gt_hierarchy_dict_lut['descendants'],
descendants_lut)
if __name__ == '__main__':
tf.test.main()
| 15,772 | 26.383681 | 80 | py |
models | models-master/research/object_detection/utils/metrics.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A float numpy array representing weighted true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(labels, np.ndarray) or len(labels.shape) != 1:
raise ValueError("labels must be single dimension numpy array")
if labels.dtype != float and labels.dtype != bool:
raise ValueError("labels type must be either bool or float")
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
true_positive_labels = labels[sorted_indices]
false_positive_labels = (true_positive_labels <= 0).astype(float)
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != float or recall.dtype != float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
def compute_median_rank_at_k(tp_fp_list, k):
"""Computes MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
k: number of top-scoring proposals to take.
Returns:
median_rank: median rank of all true positive proposals among top k by
score.
"""
ranks = []
for i in range(len(tp_fp_list)):
ranks.append(
np.where(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0)[0])
concatenated_ranks = np.concatenate(ranks)
return np.median(concatenated_ranks)
def compute_recall_at_k(tp_fp_list, num_gt, k):
"""Computes Recall@k, MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
num_gt: number of groundtruth anotations.
k: number of top-scoring proposals to take.
Returns:
recall: recall evaluated on the top k by score detections.
"""
tp_fp_eval = []
for i in range(len(tp_fp_list)):
tp_fp_eval.append(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])])
tp_fp_eval = np.concatenate(tp_fp_eval)
return np.sum(tp_fp_eval) / num_gt
| 7,221 | 36.226804 | 80 | py |
models | models-master/research/object_detection/utils/object_detection_evaluation_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.object_detection_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import standard_fields
from object_detection.utils import object_detection_evaluation
from object_detection.utils import tf_version
class OpenImagesV2EvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oiv2_evaluator = object_detection_evaluation.OpenImagesDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_group_of:
np.array([], dtype=bool)
})
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list2 = np.array([False, True, False], dtype=bool)
oiv2_evaluator.add_single_ground_truth_image_info(image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list2
})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
oiv2_evaluator.add_single_detected_image_info(image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oiv2_evaluator.evaluate()
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/cat'], 0.16666666)
self.assertAlmostEqual(metrics['OpenImagesV2_Precision/[email protected]'],
0.05555555)
oiv2_evaluator.clear()
self.assertFalse(oiv2_evaluator._image_ids)
class OpenImagesChallengeEvaluatorTest(tf.test.TestCase):
def test_returns_correct_detection_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oivchallenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=False, group_of_weight=0.5))
image_key = 'img1'
groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels = np.array([1, 3, 1], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels,
})
image_key = 'img2'
groundtruth_boxes = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list
})
image_key = 'img3'
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels = np.array([2], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels
})
image_key = 'img1'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120]], dtype=float)
detected_class_labels = np.array([2, 2], dtype=int)
detected_scores = np.array([0.7, 0.8], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220],
[10, 10, 11, 11]],
dtype=float)
detected_class_labels = np.array([1, 1, 2, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.5, 0.9], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img3'
detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
detected_class_labels = np.array([2], dtype=int)
detected_scores = np.array([0.5], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oivchallenge_evaluator.evaluate()
expected_metric_name = 'OpenImagesDetectionChallenge'
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/[email protected]/dog'],
0.3333333333)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/[email protected]/elephant'],
0.333333333333)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/[email protected]/cat'],
0.142857142857)
self.assertAlmostEqual(
metrics[expected_metric_name + '_Precision/[email protected]'],
0.269841269)
oivchallenge_evaluator.clear()
self.assertFalse(oivchallenge_evaluator._image_ids)
def test_returns_correct_instance_segm_metric_values(self):
categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}]
oivchallenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=True))
image_key = 'img1'
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels = np.array([1, 2, 1], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
zero_mask = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, zero_mask, zero_mask],
axis=0)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks
})
image_key = 'img3'
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels = np.array([2], dtype=int)
groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0], axis=0)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks
})
image_key = 'img1'
detected_boxes = np.array([[0, 0, 2, 2], [2, 2, 3, 3]], dtype=float)
detection_mask_0 = np.array([[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detection_mask_0, zero_mask], axis=0)
detected_class_labels = np.array([2, 1], dtype=int)
detected_scores = np.array([0.7, 0.8], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels,
standard_fields.DetectionResultFields.detection_masks:
detected_masks
})
image_key = 'img3'
detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
detected_class_labels = np.array([2], dtype=int)
detected_scores = np.array([0.5], dtype=float)
detected_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_mask_0], axis=0)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels,
standard_fields.DetectionResultFields.detection_masks:
detected_masks
})
metrics = oivchallenge_evaluator.evaluate()
expected_metric_name = 'OpenImagesInstanceSegmentationChallenge'
self.assertAlmostEqual(
metrics[expected_metric_name + '_PerformanceByCategory/[email protected]/dog'],
1.0)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/[email protected]/cat'],
0)
self.assertAlmostEqual(
metrics[expected_metric_name + '_Precision/[email protected]'], 0.5)
oivchallenge_evaluator.clear()
self.assertFalse(oivchallenge_evaluator._image_ids)
class PascalEvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values_on_boxes(self):
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
pascal_evaluator.add_single_ground_truth_image_info(
image_key1,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_difficult:
np.array([], dtype=bool)})
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
pascal_evaluator.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
pascal_evaluator.add_single_ground_truth_image_info(
image_key3,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
pascal_evaluator.add_single_detected_image_info(
image_key,
{standard_fields.DetectionResultFields.detection_boxes: detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels})
metrics = pascal_evaluator.evaluate()
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/[email protected]/cat'], 0.16666666)
self.assertAlmostEqual(metrics['PascalBoxes_Precision/[email protected]'],
0.05555555)
pascal_evaluator.clear()
self.assertFalse(pascal_evaluator._image_ids)
def test_returns_correct_metric_values_on_masks(self):
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
pascal_evaluator = (
object_detection_evaluation.PascalInstanceSegmentationEvaluator(
categories))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
groundtruth_masks_1_0 = np.array([[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1_1 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_1_2 = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]], dtype=np.uint8)
groundtruth_masks1 = np.stack(
[groundtruth_masks_1_0, groundtruth_masks_1_1, groundtruth_masks_1_2],
axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_difficult:
np.array([], dtype=bool)
})
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
groundtruth_masks_2_0 = np.array([[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_2_1 = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_2_2 = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks2 = np.stack(
[groundtruth_masks_2_0, groundtruth_masks_2_1, groundtruth_masks_2_2],
axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
groundtruth_masks_3_0 = np.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks3 = np.stack([groundtruth_masks_3_0], axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
detected_masks_0 = np.array([[1, 1, 1, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_masks:
detected_masks,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = pascal_evaluator.evaluate()
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/[email protected]/cat'], 0.16666666)
self.assertAlmostEqual(metrics['PascalMasks_Precision/[email protected]'],
0.05555555)
pascal_evaluator.clear()
self.assertFalse(pascal_evaluator._image_ids)
class WeightedPascalEvaluationTest(tf.test.TestCase):
def setUp(self):
self.categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
def create_and_add_common_ground_truth(self):
# Add groundtruth
self.wp_eval = (
object_detection_evaluation.WeightedPascalDetectionEvaluator(
self.categories))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key1,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1})
# add 'img2' separately
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key3,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3})
def add_common_detected(self):
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.wp_eval.add_single_detected_image_info(
image_key,
{standard_fields.DetectionResultFields.detection_boxes: detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels})
def test_returns_correct_metric_values(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/cat'], 0.5 / 4)
self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +
'Precision/[email protected]'],
1. / (4 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
def test_returns_correct_metric_values_with_difficult_list(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.wp_eval.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/cat'], 0.5 / 3)
self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +
'Precision/[email protected]'],
1. / (3 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
class PrecisionAtRecallEvaluationTest(tf.test.TestCase):
def setUp(self):
self.categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
def create_and_add_common_ground_truth(self):
# Add groundtruth
self.wp_eval = (
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator(
self.categories, recall_lower_bound=0.0, recall_upper_bound=0.5))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
# add 'img2' separately
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
def add_common_detected(self):
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.wp_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
def test_returns_correct_metric_values(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/cat'], 0.5 / 4)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'Precision/[email protected]@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 4)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
def test_returns_correct_metric_values_with_difficult_list(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.wp_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/[email protected]/cat'], 0.5 / 3)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'Precision/[email protected]@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
def test_returns_correct_metric_values_partial_labels(self):
# Create partial label evaluation object.
self.wp_eval_partial = (
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator(
self.categories,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
skip_predictions_for_unlabeled_class=True))
# The first test case is as follows:
#
# Labeled classes: [cat, dog]
# +--------------------------+-------------------------------------+------+
# | Groundtruth | Matched Detection | Type |
# +--------------------------+-------------------------------------+------+
# | cat [10, 10, 11, 11] | cat (0.6) [10, 10, 11, 11] | TP |
# | cat [100, 100, 220, 220] | - | FN |
# | dog [100, 100, 120, 120] | dog (0.8) [100, 100, 120, 120] | TP |
# | - | dog (0.9) [10, 10, 11, 11] | FP |
# | - | elephant (0.9) [100, 100, 220, 220] | IGN |
# +--------------------------+-------------------------------------+------+
image_key1 = 'img1'
# Add groundtruth boxes for img1.
groundtruth_boxes1 = np.array(
[[10, 10, 11, 11], [100, 100, 220, 220], [100, 100, 120, 120]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 1, 2], dtype=int)
groundtruth_labeled_classes1 = np.array([0, 1, 1, 0], dtype=int)
self.wp_eval_partial.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_labeled_classes:
groundtruth_labeled_classes1
})
# Add detected boxes for img1.
detected_boxes1 = np.array([[10, 10, 11, 11], [10, 10, 11, 11],
[100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels1 = np.array([1, 2, 2, 3], dtype=int)
detected_scores1 = np.array([0.6, 0.9, 0.8, 0.9], dtype=float)
self.wp_eval_partial.add_single_detected_image_info(
image_key1, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes1,
standard_fields.DetectionResultFields.detection_scores:
detected_scores1,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels1
})
# The second test case is as follows:
#
# Labeled classes: [dog, elephant]
# +---------------------------+---------------------------------+------+
# | Groundtruth | Matched Detection | Type |
# +---------------------------+---------------------------------+------+
# | - | cat (0.8) [100, 100, 120, 120] | IGN |
# | dog [100, 100, 120, 120] | - | FN |
# | elephant [10, 10, 11, 11] | elephant (0.9) [10, 10, 11, 11] | TP |
# +---------------------------+---------------------------------+------+
image_key2 = 'img2'
# Add groundtruth boxes for img2.
groundtruth_boxes2 = np.array(
[[100, 100, 120, 120], [10, 10, 11, 11]], dtype=float)
groundtruth_class_labels2 = np.array([2, 3], dtype=int)
groundtruth_labeled_classes2 = np.array([0, 0, 1, 1], dtype=int)
self.wp_eval_partial.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_labeled_classes:
groundtruth_labeled_classes2
})
# Add detected boxes for img2.
detected_boxes2 = np.array(
[[100, 100, 120, 120], [10, 10, 11, 11]],
dtype=float)
detected_class_labels2 = np.array([1, 3], dtype=int)
detected_scores2 = np.array([0.8, 0.9], dtype=float)
self.wp_eval_partial.add_single_detected_image_info(
image_key2, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes2,
standard_fields.DetectionResultFields.detection_scores:
detected_scores2,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels2
})
# Compute AP metrics.
metrics = self.wp_eval_partial.evaluate()
# Precision and recall for cat.
# +------------+------+-----------+--------+
# | Confidence | Type | Precision | Recall |
# +------------+------+-----------+--------+
# | 0.8 | IGN | - | - |
# | 0.6 | TP | 0.5 | 0.5 |
# +------------+------+-----------+--------+
# Expected AP: 0.5
self.assertAlmostEqual(
metrics[self.wp_eval_partial._metric_prefix +
'PerformanceByCategory/[email protected]/cat'], 0.5)
# Precision and recall for dog.
# +------------+------+-----------+--------+
# | Confidence | Type | Precision | Recall |
# +------------+------+-----------+--------+
# | 0.9 | FP | 0.0 | 0.0 |
# | 0.8 | TP | 0.5 | 0.5 |
# +------------+------+-----------+--------+
#
# After non-decreasing preprocessing of precision in
# third_party/tensorflow_models/object_detection/utils/metrics.py.
# +------------+------+-----------+--------+
# | Confidence | Type | Precision | Recall |
# +------------+------+-----------+--------+
# | 0.9 | FP | 0.5 | 0.0 |
# | 0.8 | TP | 0.5 | 0.5 |
# +------------+------+-----------+--------+
# Expected AP: 0.25
self.assertAlmostEqual(
metrics[self.wp_eval_partial._metric_prefix +
'PerformanceByCategory/[email protected]/dog'], 0.25)
# Precision and recall for elephant.
# +------------+------+-----------+--------+
# | Confidence | Type | Precision | Recall |
# +------------+------+-----------+--------+
# | 0.9 | IGN | - | - |
# | 0.9 | TP | 1.0 | 1.0 |
# +------------+------+-----------+--------+
# Expected AP: 1.0
self.assertAlmostEqual(
metrics[self.wp_eval_partial._metric_prefix +
'PerformanceByCategory/[email protected]/elephant'], 1.0)
# Expected mAP: (AP_cat + AP_dog + AP_elephant) / 3 = (0.25 + 1.0 + 0.5) / 3
self.assertAlmostEqual(
metrics[self.wp_eval_partial._metric_prefix +
'Precision/[email protected]'], (0.25 + 1.0 + 0.5) / 3)
self.wp_eval_partial.clear()
self.assertFalse(self.wp_eval_partial._image_ids)
class ObjectDetectionEvaluationTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 3
self.od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
num_groundtruth_classes)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)
self.od_eval.add_single_ground_truth_image_info(
image_key1, groundtruth_boxes1, groundtruth_class_labels1)
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([0, 0, 2], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)
self.od_eval.add_single_ground_truth_image_info(
image_key2, groundtruth_boxes2, groundtruth_class_labels2,
groundtruth_is_difficult_list2, groundtruth_is_group_of_list2)
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([1], dtype=int)
self.od_eval.add_single_ground_truth_image_info(
image_key3, groundtruth_boxes3, groundtruth_class_labels3)
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([0, 0, 2], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.od_eval.add_single_detected_image_info(
image_key, detected_boxes, detected_scores, detected_class_labels)
def test_value_error_on_zero_classes(self):
with self.assertRaises(ValueError):
object_detection_evaluation.ObjectDetectionEvaluation(
num_groundtruth_classes=0)
def test_add_single_ground_truth_image_info(self):
expected_num_gt_instances_per_class = np.array([3, 1, 1], dtype=int)
expected_num_gt_imgs_per_class = np.array([2, 1, 2], dtype=int)
self.assertTrue(np.array_equal(expected_num_gt_instances_per_class,
self.od_eval.num_gt_instances_per_class))
self.assertTrue(np.array_equal(expected_num_gt_imgs_per_class,
self.od_eval.num_gt_imgs_per_class))
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
self.assertTrue(np.allclose(self.od_eval.groundtruth_boxes['img2'],
groundtruth_boxes2))
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(
self.od_eval.groundtruth_is_difficult_list['img2'],
groundtruth_is_difficult_list2))
groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)
self.assertTrue(
np.allclose(self.od_eval.groundtruth_is_group_of_list['img2'],
groundtruth_is_group_of_list2))
groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)
self.assertTrue(np.array_equal(self.od_eval.groundtruth_class_labels[
'img1'], groundtruth_class_labels1))
def test_add_single_detected_image_info(self):
expected_scores_per_class = [[np.array([0.8, 0.7], dtype=float)], [],
[np.array([0.9], dtype=float)]]
expected_tp_fp_labels_per_class = [[np.array([0, 1], dtype=bool)], [],
[np.array([0], dtype=bool)]]
expected_num_images_correctly_detected_per_class = np.array([0, 0, 0],
dtype=int)
for i in range(self.od_eval.num_class):
for j in range(len(expected_scores_per_class[i])):
self.assertTrue(np.allclose(expected_scores_per_class[i][j],
self.od_eval.scores_per_class[i][j]))
self.assertTrue(np.array_equal(expected_tp_fp_labels_per_class[i][
j], self.od_eval.tp_fp_labels_per_class[i][j]))
self.assertTrue(np.array_equal(
expected_num_images_correctly_detected_per_class,
self.od_eval.num_images_correctly_detected_per_class))
def test_evaluate(self):
(average_precision_per_class, mean_ap, precisions_per_class,
recalls_per_class, corloc_per_class,
mean_corloc) = self.od_eval.evaluate()
expected_precisions_per_class = [np.array([0, 0.5], dtype=float),
np.array([], dtype=float),
np.array([0], dtype=float)]
expected_recalls_per_class = [
np.array([0, 1. / 3.], dtype=float), np.array([], dtype=float),
np.array([0], dtype=float)
]
expected_average_precision_per_class = np.array([1. / 6., 0, 0],
dtype=float)
expected_corloc_per_class = np.array([0, 0, 0], dtype=float)
expected_mean_ap = 1. / 18
expected_mean_corloc = 0.0
for i in range(self.od_eval.num_class):
self.assertTrue(np.allclose(expected_precisions_per_class[i],
precisions_per_class[i]))
self.assertTrue(np.allclose(expected_recalls_per_class[i],
recalls_per_class[i]))
self.assertTrue(np.allclose(expected_average_precision_per_class,
average_precision_per_class))
self.assertTrue(np.allclose(expected_corloc_per_class, corloc_per_class))
self.assertAlmostEqual(expected_mean_ap, mean_ap)
self.assertAlmostEqual(expected_mean_corloc, mean_corloc)
def test_merge_internal_state(self):
# Test that if initial state is merged, the results of the evaluation are
# the same.
od_eval_state = self.od_eval.get_internal_state()
copy_od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
self.od_eval.num_class)
copy_od_eval.merge_internal_state(od_eval_state)
(average_precision_per_class, mean_ap, precisions_per_class,
recalls_per_class, corloc_per_class,
mean_corloc) = self.od_eval.evaluate()
(copy_average_precision_per_class, copy_mean_ap, copy_precisions_per_class,
copy_recalls_per_class, copy_corloc_per_class,
copy_mean_corloc) = copy_od_eval.evaluate()
for i in range(self.od_eval.num_class):
self.assertTrue(
np.allclose(copy_precisions_per_class[i], precisions_per_class[i]))
self.assertTrue(
np.allclose(copy_recalls_per_class[i], recalls_per_class[i]))
self.assertTrue(
np.allclose(copy_average_precision_per_class,
average_precision_per_class))
self.assertTrue(np.allclose(copy_corloc_per_class, corloc_per_class))
self.assertAlmostEqual(copy_mean_ap, mean_ap)
self.assertAlmostEqual(copy_mean_corloc, mean_corloc)
@unittest.skipIf(tf_version.is_tf2(), 'Eval Metrics ops are supported in TF1.X '
'only.')
class ObjectDetectionEvaluatorTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self.categories = [{
'id': 1,
'name': 'person'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'cat'
}]
self.od_eval = object_detection_evaluation.ObjectDetectionEvaluator(
categories=self.categories)
def _make_evaluation_dict(self,
resized_groundtruth_masks=False,
batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8)
if batch_size == 1:
key = tf.constant('image1')
else:
key = tf.constant([str(i) for i in range(batch_size)])
detection_boxes = tf.concat([
tf.tile(
tf.constant([[[0., 0., 1., 1.]]]), multiples=[batch_size - 1, 1, 1
]),
tf.constant([[[0., 0., 0.5, 0.5]]])
],
axis=0)
detection_scores = tf.concat([
tf.tile(tf.constant([[0.5]]), multiples=[batch_size - 1, 1]),
tf.constant([[0.8]])
],
axis=0)
detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1])
detection_masks = tf.tile(
tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32),
multiples=[batch_size, 1, 1, 1])
groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])
groundtruth_classes = tf.constant([1])
groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)
num_detections = tf.ones([batch_size])
if resized_groundtruth_masks:
groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)
if batch_size > 1:
groundtruth_boxes = tf.tile(
tf.expand_dims(groundtruth_boxes, 0), multiples=[batch_size, 1, 1])
groundtruth_classes = tf.tile(
tf.expand_dims(groundtruth_classes, 0), multiples=[batch_size, 1])
groundtruth_instance_masks = tf.tile(
tf.expand_dims(groundtruth_instance_masks, 0),
multiples=[batch_size, 1, 1, 1])
detections = {
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
detection_fields.num_detections: num_detections
}
groundtruth = {
input_data_fields.groundtruth_boxes:
groundtruth_boxes,
input_data_fields.groundtruth_classes:
groundtruth_classes,
input_data_fields.groundtruth_instance_masks:
groundtruth_instance_masks,
}
if batch_size > 1:
return eval_util.result_dict_for_batched_example(
image,
key,
detections,
groundtruth,
scale_to_absolute=scale_to_absolute,
max_gt_boxes=max_gt_boxes)
else:
return eval_util.result_dict_for_single_example(
image,
key,
detections,
groundtruth,
scale_to_absolute=scale_to_absolute)
@parameterized.parameters({
'batch_size': 1,
'expected_map': 0,
'max_gt_boxes': None,
'scale_to_absolute': True
}, {
'batch_size': 8,
'expected_map': 0.765625,
'max_gt_boxes': [1],
'scale_to_absolute': True
}, {
'batch_size': 1,
'expected_map': 0,
'max_gt_boxes': None,
'scale_to_absolute': False
}, {
'batch_size': 8,
'expected_map': 0.765625,
'max_gt_boxes': [1],
'scale_to_absolute': False
})
def test_get_estimator_eval_metric_ops(self,
batch_size=1,
expected_map=1,
max_gt_boxes=None,
scale_to_absolute=False):
eval_dict = self._make_evaluation_dict(
batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
tf.logging.info('eval_dict: {}'.format(eval_dict))
metric_ops = self.od_eval.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops['Precision/[email protected]']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
self.assertAlmostEqual(expected_map, metrics['Precision/[email protected]'])
if __name__ == '__main__':
tf.test.main()
| 51,865 | 42.257715 | 80 | py |
models | models-master/research/object_detection/utils/autoaugment_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment util file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import six
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import image as contrib_image
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
""""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
[('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
[('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=1)
content_tensor = tf.pad(content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y+1, min_x:max_x+1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
return image, new_bbox
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concates bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(tf.equal(tf.size(bboxes), 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox(
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y+1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x+1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
image = tf.cond(tf.equal(tf.size(bboxes), 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level/_MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level/_MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level/_MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in arg_spec[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in arg_spec[0]:
# Make sure replace is the final argument
assert 'replace' == arg_spec[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in arg_spec[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
assert 'bboxes' == arg_spec[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in arg_spec[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return (image, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes:
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image, augmented_bbox = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_image, augmented_bbox)
# TODO(barretzoph): Add in ArXiv link once paper is out.
def distort_image_with_autoaugment(image, bboxes, augmentation_name):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
image = tf.cast(image, tf.uint8)
available_policies = {'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = contrib_training.HParams(
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120)
augmented_image, augmented_bbox = (
build_and_apply_nas_policy(policy, image, bboxes, augmentation_hparams))
augmented_image = tf.cast(augmented_image, tf.float32)
return augmented_image, augmented_bbox
| 65,476 | 38.658995 | 88 | py |
models | models-master/research/object_detection/utils/test_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.test_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import test_utils
class TestUtilsTest(test_case.TestCase):
def test_diagonal_gradient_image(self):
"""Tests if a good pyramid image is created."""
pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2)
# Test which is easy to understand.
expected_first_channel = np.array([[3, 2, 1, 0],
[4, 3, 2, 1],
[5, 4, 3, 2]], dtype=np.float32)
self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]),
expected_first_channel)
# Actual test.
expected_image = np.array([[[3, 30],
[2, 20],
[1, 10],
[0, 0]],
[[4, 40],
[3, 30],
[2, 20],
[1, 10]],
[[5, 50],
[4, 40],
[3, 30],
[2, 20]]], dtype=np.float32)
self.assertAllEqual(pyramid_image, expected_image)
def test_random_boxes(self):
"""Tests if valid random boxes are created."""
num_boxes = 1000
max_height = 3
max_width = 5
boxes = test_utils.create_random_boxes(num_boxes,
max_height,
max_width)
true_column = np.ones(shape=(num_boxes)) == 1
self.assertAllEqual(boxes[:, 0] < boxes[:, 2], true_column)
self.assertAllEqual(boxes[:, 1] < boxes[:, 3], true_column)
self.assertGreaterEqual(boxes[:, 0].min(), 0)
self.assertGreaterEqual(boxes[:, 1].min(), 0)
self.assertLessEqual(boxes[:, 2].max(), max_height)
self.assertLessEqual(boxes[:, 3].max(), max_width)
def test_first_rows_close_as_set(self):
a = [1, 2, 3, 0, 0]
b = [3, 2, 1, 0, 0]
k = 3
self.assertTrue(test_utils.first_rows_close_as_set(a, b, k))
a = [[1, 2], [1, 4], [0, 0]]
b = [[1, 4 + 1e-9], [1, 2], [0, 0]]
k = 2
self.assertTrue(test_utils.first_rows_close_as_set(a, b, k))
a = [[1, 2], [1, 4], [0, 0]]
b = [[1, 4 + 1e-9], [2, 2], [0, 0]]
k = 2
self.assertFalse(test_utils.first_rows_close_as_set(a, b, k))
if __name__ == '__main__':
tf.test.main()
| 3,328 | 34.042105 | 80 | py |
models | models-master/research/object_detection/utils/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/utils/static_shape_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.static_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import static_shape
class StaticShapeTest(tf.test.TestCase):
def test_return_correct_batchSize(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(32, static_shape.get_batch_size(tensor_shape))
def test_return_correct_height(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(299, static_shape.get_height(tensor_shape))
def test_return_correct_width(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(384, static_shape.get_width(tensor_shape))
def test_return_correct_depth(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(3, static_shape.get_depth(tensor_shape))
def test_die_on_tensor_shape_with_rank_three(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384])
with self.assertRaises(ValueError):
static_shape.get_batch_size(tensor_shape)
static_shape.get_height(tensor_shape)
static_shape.get_width(tensor_shape)
static_shape.get_depth(tensor_shape)
if __name__ == '__main__':
tf.test.main()
| 2,009 | 35.545455 | 80 | py |
models | models-master/research/object_detection/utils/np_box_mask_list_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_mask_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_mask_list
class BoxMaskListTest(tf.test.TestCase):
def test_invalid_box_mask_data(self):
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=[0, 0, 1, 1],
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 0, 1, 1]], dtype=int),
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([0, 1, 1, 3, 4], dtype=float),
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([3, 5, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5, 5, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5, 5], dtype=np.int32))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=np.zeros([3, 5, 5], dtype=np.uint8))
self.assertTrue(box_mask_list.has_field('boxes'))
self.assertTrue(box_mask_list.has_field('masks'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=np.zeros([3, 3, 3], dtype=np.uint8))
self.assertFalse(box_mask_list.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
self.assertTrue(np.allclose(box_mask_list.get_field('boxes'), boxes))
self.assertTrue(np.allclose(box_mask_list.get_field('masks'), masks))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
with self.assertRaises(ValueError):
box_mask_list.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
self.box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
def test_add_already_existed_field_bbox(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('boxes',
np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_already_existed_field_mask(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('masks',
np.zeros([3, 3, 3], dtype=np.uint8))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.box_mask_list.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
box_mask_list = self.box_mask_list
scores = np.array([0.5, 0.7, 0.9], dtype=float)
box_mask_list.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.box_mask_list.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
box_mask_list = self.box_mask_list
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
box_mask_list.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.box_mask_list.get_field('labels')))
def test_get_extra_fields(self):
box_mask_list = self.box_mask_list
self.assertItemsEqual(box_mask_list.get_extra_fields(), ['masks'])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
box_mask_list.add_field('scores', scores)
self.assertItemsEqual(box_mask_list.get_extra_fields(), ['masks', 'scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
box_mask_list.add_field('labels', labels)
self.assertItemsEqual(box_mask_list.get_extra_fields(),
['masks', 'scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.box_mask_list.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
masks = np.zeros([2, 5, 5], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
expected_num_boxes = 2
self.assertEquals(box_mask_list.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
| 7,611 | 39.705882 | 80 | py |
models | models-master/research/object_detection/utils/ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for helper tensorflow ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import static_shape
matmul_crop_and_resize = spatial_ops.matmul_crop_and_resize
multilevel_roi_align = spatial_ops.multilevel_roi_align
native_crop_and_resize = spatial_ops.native_crop_and_resize
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a tensor of shape [None, num_boxes, 4] in
normalized coordinates. The dtype of this tensor must support tf.mul.
image_shape: a tensor of shape [4] containing the image shape, with same
dtype as `normalized_boxes`.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a tensor of shape [None, num_boxes, 4] containing
the boxes in image coordinates, with same
dtype as `normalized_boxes`.
"""
x_scale = tf.cast(image_shape[2], normalized_boxes.dtype)
y_scale = tf.cast(image_shape[1], normalized_boxes.dtype)
def _to_absolute_coordinates(normalized_boxes):
y_min, x_min, y_max, x_max = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxes = tf.concat([y_min, x_min, y_max, x_max], 1)
return scaled_boxes
absolute_boxes = shape_utils.static_or_dynamic_map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=normalized_boxes.dtype,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
def meshgrid(x, y):
"""Tiles the contents of x and y into a pair of grids.
Multidimensional analog of numpy.meshgrid, giving the same behavior if x and y
are vectors. Generally, this will give:
xgrid(i1, ..., i_m, j_1, ..., j_n) = x(j_1, ..., j_n)
ygrid(i1, ..., i_m, j_1, ..., j_n) = y(i_1, ..., i_m)
Keep in mind that the order of the arguments and outputs is reverse relative
to the order of the indices they go into, done for compatibility with numpy.
The output tensors have the same shapes. Specifically:
xgrid.get_shape() = y.get_shape().concatenate(x.get_shape())
ygrid.get_shape() = y.get_shape().concatenate(x.get_shape())
Args:
x: A tensor of arbitrary shape and rank. xgrid will contain these values
varying in its last dimensions.
y: A tensor of arbitrary shape and rank. ygrid will contain these values
varying in its first dimensions.
Returns:
A tuple of tensors (xgrid, ygrid).
"""
with tf.name_scope('Meshgrid'):
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
x_exp_shape = expanded_shape(tf.shape(x), 0, tf.rank(y))
y_exp_shape = expanded_shape(tf.shape(y), tf.rank(y), tf.rank(x))
xgrid = tf.tile(tf.reshape(x, x_exp_shape), y_exp_shape)
ygrid = tf.tile(tf.reshape(y, y_exp_shape), x_exp_shape)
new_shape = y.get_shape().concatenate(x.get_shape())
xgrid.set_shape(new_shape)
ygrid.set_shape(new_shape)
return xgrid, ygrid
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def pad_to_multiple(tensor, multiple):
"""Returns the tensor zero padded to the specified multiple.
Appends 0s to the end of the first and second dimension (height and width) of
the tensor until both dimensions are a multiple of the input argument
'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
be of shape [1, 4, 8, 1].
Args:
tensor: rank 4 float32 tensor, where
tensor -> [batch_size, height, width, channels].
multiple: the multiple to pad to.
Returns:
padded_tensor: the tensor zero padded to the specified multiple.
"""
if multiple == 1:
return tensor
tensor_shape = tensor.get_shape()
batch_size = static_shape.get_batch_size(tensor_shape)
tensor_height = static_shape.get_height(tensor_shape)
tensor_width = static_shape.get_width(tensor_shape)
tensor_depth = static_shape.get_depth(tensor_shape)
if batch_size is None:
batch_size = tf.shape(tensor)[0]
if tensor_height is None:
tensor_height = tf.shape(tensor)[1]
padded_tensor_height = tf.cast(
tf.ceil(
tf.cast(tensor_height, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_height = int(
math.ceil(float(tensor_height) / multiple) * multiple)
if tensor_width is None:
tensor_width = tf.shape(tensor)[2]
padded_tensor_width = tf.cast(
tf.ceil(
tf.cast(tensor_width, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_width = int(
math.ceil(float(tensor_width) / multiple) * multiple)
if tensor_depth is None:
tensor_depth = tf.shape(tensor)[3]
# Use tf.concat instead of tf.pad to preserve static shape
if padded_tensor_height != tensor_height:
height_pad = tf.zeros([
batch_size, padded_tensor_height - tensor_height, tensor_width,
tensor_depth
], dtype=tensor.dtype)
tensor = tf.concat([tensor, height_pad], 1)
if padded_tensor_width != tensor_width:
width_pad = tf.zeros([
batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
tensor_depth
], dtype=tensor.dtype)
tensor = tf.concat([tensor, width_pad], 2)
return tensor
def padded_one_hot_encoding(indices, depth, left_pad):
"""Returns a zero padded one-hot tensor.
This function converts a sparse representation of indices (e.g., [4]) to a
zero padded one-hot representation (e.g., [0, 0, 0, 0, 1] with depth = 4 and
left_pad = 1). If `indices` is empty, the result will simply be a tensor of
shape (0, depth + left_pad). If depth = 0, then this function just returns
`None`.
Args:
indices: an integer tensor of shape [num_indices].
depth: depth for the one-hot tensor (integer).
left_pad: number of zeros to left pad the one-hot tensor with (integer).
Returns:
padded_onehot: a tensor with shape (num_indices, depth + left_pad). Returns
`None` if the depth is zero.
Raises:
ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are
either negative or non-integers.
TODO(rathodv): add runtime checks for depth and indices.
"""
if depth < 0 or not isinstance(depth, six.integer_types):
raise ValueError('`depth` must be a non-negative integer.')
if left_pad < 0 or not isinstance(left_pad, six.integer_types):
raise ValueError('`left_pad` must be a non-negative integer.')
if depth == 0:
return None
rank = len(indices.get_shape().as_list())
if rank != 1:
raise ValueError('`indices` must have rank 1, but has rank=%s' % rank)
def one_hot_and_pad():
one_hot = tf.cast(tf.one_hot(tf.cast(indices, tf.int64), depth,
on_value=1, off_value=0), tf.float32)
return tf.pad(one_hot, [[0, 0], [left_pad, 0]], mode='CONSTANT')
result = tf.cond(tf.greater(tf.size(indices), 0), one_hot_and_pad,
lambda: tf.zeros((tf.size(indices), depth + left_pad)))
return tf.reshape(result, [-1, depth + left_pad])
def dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):
"""Converts bounding boxes from dense to sparse form.
Args:
dense_locations: a [max_num_boxes, 4] tensor in which only the first k rows
are valid bounding box location coordinates, where k is the sum of
elements in dense_num_boxes.
dense_num_boxes: a [max_num_classes] tensor indicating the counts of
various bounding box classes e.g. [1, 0, 0, 2] means that the first
bounding box is of class 0 and the second and third bounding boxes are
of class 3. The sum of elements in this tensor is the number of valid
bounding boxes.
num_classes: number of classes
Returns:
box_locations: a [num_boxes, 4] tensor containing only valid bounding
boxes (i.e. the first num_boxes rows of dense_locations)
box_classes: a [num_boxes] tensor containing the classes of each bounding
box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]
"""
num_valid_boxes = tf.reduce_sum(dense_num_boxes)
box_locations = tf.slice(dense_locations,
tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))
tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))
for i in range(num_classes)]
box_classes = tf.concat(tiled_classes, 0)
box_locations.set_shape([None, 4])
return box_locations, box_classes
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)],
[zeros, values])
def reduce_sum_trailing_dimensions(tensor, ndims):
"""Computes sum across all dimensions following first `ndims` dimensions."""
return tf.reduce_sum(tensor, axis=tuple(range(ndims, tensor.shape.ndims)))
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict
def retain_groundtruth_with_positive_classes(tensor_dict):
"""Retains only groundtruth with positive class ids.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
Returns:
a dictionary of tensors containing only the groundtruth with positive
classes.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
keep_indices = tf.where(tf.greater(
tensor_dict[fields.InputDataFields.groundtruth_classes], 0))
return retain_groundtruth(tensor_dict, keep_indices)
def replace_nan_groundtruth_label_scores_with_ones(label_scores):
"""Replaces nan label scores with 1.0.
Args:
label_scores: a tensor containing object annoation label scores.
Returns:
a tensor where NaN label scores have been replaced by ones.
"""
return tf.where(
tf.is_nan(label_scores), tf.ones(tf.shape(label_scores)), label_scores)
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.cast(
tf.is_nan(groundtruth_boxes), dtype=tf.int32), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
def filter_unrecognized_classes(tensor_dict):
"""Filters out class labels that are not unrecognized by the labelmap.
Decoder would parse unrecognized classes (not included in the labelmap) to
a label of value -1. Such targets are unecessary for training, and causes
issue for evaluation, due to labeling mapping logic. This function filters
those labels out for both training and evaluation.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors
obtained after applying the filtering.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
# Refer to tf_example_decoder for how unrecognized labels are handled.
unrecognized_label = -1
recognized_indices = tf.where(
tf.greater(tensor_dict[fields.InputDataFields.groundtruth_classes],
unrecognized_label))
return retain_groundtruth(tensor_dict, recognized_indices)
def normalize_to_target(inputs,
target_norm_value,
dim,
epsilon=1e-7,
trainable=True,
scope='NormalizeToTarget',
summarize=True):
"""L2 normalizes the inputs across the specified dimension to a target norm.
This op implements the L2 Normalization layer introduced in
Liu, Wei, et al. "SSD: Single Shot MultiBox Detector."
and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.
"Parsenet: Looking wider to see better." and is useful for bringing
activations from multiple layers in a convnet to a standard scale.
Note that the rank of `inputs` must be known and the dimension to which
normalization is to be applied should be statically defined.
TODO(jonathanhuang): Add option to scale by L2 norm of the entire input.
Args:
inputs: A `Tensor` of arbitrary size.
target_norm_value: A float value that specifies an initial target norm or
a list of floats (whose length must be equal to the depth along the
dimension to be normalized) specifying a per-dimension multiplier
after normalization.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
trainable: Whether the norm is trainable or not
scope: Optional scope for variable_scope.
summarize: Whether or not to add a tensorflow summary for the op.
Returns:
The input tensor normalized to the specified target norm.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
ValueError: If target_norm_value is not a float or a list of floats with
length equal to the depth along the dimension to be normalized.
"""
with tf.variable_scope(scope, 'NormalizeToTarget', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_shape = inputs.get_shape().as_list()
input_rank = len(input_shape)
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be non-negative but smaller than the input rank.')
if not input_shape[dim]:
raise ValueError('input shape should be statically defined along '
'the specified dimension.')
depth = input_shape[dim]
if not (isinstance(target_norm_value, float) or
(isinstance(target_norm_value, list) and
len(target_norm_value) == depth) and
all([isinstance(val, float) for val in target_norm_value])):
raise ValueError('target_norm_value must be a float or a list of floats '
'with length equal to the depth along the dimension to '
'be normalized.')
if isinstance(target_norm_value, float):
initial_norm = depth * [target_norm_value]
else:
initial_norm = target_norm_value
target_norm = slim.model_variable(
name='weights',
dtype=tf.float32,
initializer=tf.constant(initial_norm, dtype=tf.float32),
trainable=trainable)
if summarize:
mean = tf.reduce_mean(target_norm)
tf.summary.scalar(tf.get_variable_scope().name, mean)
lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
mult_shape = input_rank*[1]
mult_shape[dim] = depth
return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)
def batch_position_sensitive_crop_regions(images,
boxes,
crop_size,
num_spatial_bins,
global_pool,
parallel_iterations=64):
"""Position sensitive crop with batches of images and boxes.
This op is exactly like `position_sensitive_crop_regions` below but operates
on batches of images and boxes. See `position_sensitive_crop_regions` function
below for the operation applied per batch element.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 3-D tensor of shape `[batch, num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: See `position_sensitive_crop_regions` below.
num_spatial_bins: See `position_sensitive_crop_regions` below.
global_pool: See `position_sensitive_crop_regions` below.
parallel_iterations: Number of batch items to process in parallel.
Returns:
"""
def _position_sensitive_crop_fn(inputs):
images, boxes = inputs
return position_sensitive_crop_regions(
images,
boxes,
crop_size=crop_size,
num_spatial_bins=num_spatial_bins,
global_pool=global_pool)
return shape_utils.static_or_dynamic_map_fn(
_position_sensitive_crop_fn,
elems=[images, boxes],
dtype=tf.float32,
parallel_iterations=parallel_iterations)
def position_sensitive_crop_regions(image,
boxes,
crop_size,
num_spatial_bins,
global_pool):
"""Position-sensitive crop and pool rectangular regions from a feature grid.
The output crops are split into `spatial_bins_y` vertical bins
and `spatial_bins_x` horizontal bins. For each intersection of a vertical
and a horizontal bin the output values are gathered by performing
`tf.image.crop_and_resize` (bilinear resampling) on a a separate subset of
channels of the image. This reduces `depth` by a factor of
`(spatial_bins_y * spatial_bins_x)`.
When global_pool is True, this function implements a differentiable version
of position-sensitive RoI pooling used in
[R-FCN detection system](https://arxiv.org/abs/1605.06409).
When global_pool is False, this function implements a differentiable version
of position-sensitive assembling operation used in
[instance FCN](https://arxiv.org/abs/1603.08678).
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 3-D tensor of shape `[image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`.
Represents the number of position-sensitive bins in y and x directions.
Both values should be >= 1. `crop_height` should be divisible by
`spatial_bins_y`, and similarly for width.
The number of image channels should be divisible by
(spatial_bins_y * spatial_bins_x).
Suggested value from R-FCN paper: [3, 3].
global_pool: A boolean variable.
If True, we perform average global pooling on the features assembled from
the position-sensitive score maps.
If False, we keep the position-pooled features without global pooling
over the spatial coordinates.
Note that using global_pool=True is equivalent to but more efficient than
running the function with global_pool=False and then performing global
average pooling.
Returns:
position_sensitive_features: A 4-D tensor of shape
`[num_boxes, K, K, crop_channels]`,
where `crop_channels = depth / (spatial_bins_y * spatial_bins_x)`,
where K = 1 when global_pool is True (Average-pooled cropped regions),
and K = crop_size when global_pool is False.
Raises:
ValueError: Raised in four situations:
`num_spatial_bins` is not >= 1;
`num_spatial_bins` does not divide `crop_size`;
`(spatial_bins_y*spatial_bins_x)` does not divide `depth`;
`bin_crop_size` is not square when global_pool=False due to the
constraint in function space_to_depth.
"""
total_bins = 1
bin_crop_size = []
for (num_bins, crop_dim) in zip(num_spatial_bins, crop_size):
if num_bins < 1:
raise ValueError('num_spatial_bins should be >= 1')
if crop_dim % num_bins != 0:
raise ValueError('crop_size should be divisible by num_spatial_bins')
total_bins *= num_bins
bin_crop_size.append(crop_dim // num_bins)
if not global_pool and bin_crop_size[0] != bin_crop_size[1]:
raise ValueError('Only support square bin crop size for now.')
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
spatial_bins_y, spatial_bins_x = num_spatial_bins
# Split each box into spatial_bins_y * spatial_bins_x bins.
position_sensitive_boxes = []
for bin_y in range(spatial_bins_y):
step_y = (ymax - ymin) / spatial_bins_y
for bin_x in range(spatial_bins_x):
step_x = (xmax - xmin) / spatial_bins_x
box_coordinates = [ymin + bin_y * step_y,
xmin + bin_x * step_x,
ymin + (bin_y + 1) * step_y,
xmin + (bin_x + 1) * step_x,
]
position_sensitive_boxes.append(tf.stack(box_coordinates, axis=1))
image_splits = tf.split(value=image, num_or_size_splits=total_bins, axis=2)
image_crops = []
for (split, box) in zip(image_splits, position_sensitive_boxes):
if split.shape.is_fully_defined() and box.shape.is_fully_defined():
crop = tf.squeeze(
matmul_crop_and_resize(
tf.expand_dims(split, axis=0), tf.expand_dims(box, axis=0),
bin_crop_size),
axis=0)
else:
crop = tf.image.crop_and_resize(
tf.expand_dims(split, 0), box,
tf.zeros(tf.shape(boxes)[0], dtype=tf.int32), bin_crop_size)
image_crops.append(crop)
if global_pool:
# Average over all bins.
position_sensitive_features = tf.add_n(image_crops) / len(image_crops)
# Then average over spatial positions within the bins.
position_sensitive_features = tf.reduce_mean(
position_sensitive_features, [1, 2], keepdims=True)
else:
# Reorder height/width to depth channel.
block_size = bin_crop_size[0]
if block_size >= 2:
image_crops = [tf.space_to_depth(
crop, block_size=block_size) for crop in image_crops]
# Pack image_crops so that first dimension is for position-senstive boxes.
position_sensitive_features = tf.stack(image_crops, axis=0)
# Unroll the position-sensitive boxes to spatial positions.
position_sensitive_features = tf.squeeze(
tf.batch_to_space_nd(position_sensitive_features,
block_shape=[1] + num_spatial_bins,
crops=tf.zeros((3, 2), dtype=tf.int32)),
axis=[0])
# Reorder back the depth channel.
if block_size >= 2:
position_sensitive_features = tf.depth_to_space(
position_sensitive_features, block_size=block_size)
return position_sensitive_features
def reframe_image_corners_relative_to_boxes(boxes):
"""Reframe the image corners ([0, 0, 1, 1]) to be relative to boxes.
The local coordinate frame of each box is assumed to be relative to
its own for corners.
Args:
boxes: A float tensor of [num_boxes, 4] of (ymin, xmin, ymax, xmax)
coordinates in relative coordinate space of each bounding box.
Returns:
reframed_boxes: Reframes boxes with same shape as input.
"""
ymin, xmin, ymax, xmax = (boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3])
height = tf.maximum(ymax - ymin, 1e-4)
width = tf.maximum(xmax - xmin, 1e-4)
ymin_out = (0 - ymin) / height
xmin_out = (0 - xmin) / width
ymax_out = (1 - ymin) / height
xmax_out = (1 - xmin) / width
return tf.stack([ymin_out, xmin_out, ymax_out, xmax_out], axis=1)
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width, resize_method='bilinear'):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
resize_method: The resize method, either 'bilinear' or 'nearest'. Note that
'bilinear' is only respected if box_masks is a float.
Returns:
A tensor of size [num_masks, image_height, image_width] with the same dtype
as `box_masks`.
"""
resize_method = 'nearest' if box_masks.dtype == tf.uint8 else resize_method
# TODO(rathodv): Make this a public function.
def reframe_box_masks_to_image_masks_default():
"""The default function when there are more than 0 box masks."""
num_boxes = tf.shape(box_masks)[0]
box_masks_expanded = tf.expand_dims(box_masks, axis=3)
# TODO(vighneshb) Use matmul_crop_and_resize so that the output shape
# is static. This will help us run and test on TPUs.
resized_crops = tf.image.crop_and_resize(
image=box_masks_expanded,
boxes=reframe_image_corners_relative_to_boxes(boxes),
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
method=resize_method,
extrapolation_value=0)
return tf.cast(resized_crops, box_masks.dtype)
image_masks = tf.cond(
tf.shape(box_masks)[0] > 0,
reframe_box_masks_to_image_masks_default,
lambda: tf.zeros([0, image_height, image_width, 1], box_masks.dtype))
return tf.squeeze(image_masks, axis=3)
def merge_boxes_with_multiple_labels(boxes,
classes,
confidences,
num_classes,
quantization_bins=10000):
"""Merges boxes with same coordinates and returns K-hot encoded classes.
Args:
boxes: A tf.float32 tensor with shape [N, 4] holding N boxes. Only
normalized coordinates are allowed.
classes: A tf.int32 tensor with shape [N] holding class indices.
The class index starts at 0.
confidences: A tf.float32 tensor with shape [N] holding class confidences.
num_classes: total number of classes to use for K-hot encoding.
quantization_bins: the number of bins used to quantize the box coordinate.
Returns:
merged_boxes: A tf.float32 tensor with shape [N', 4] holding boxes,
where N' <= N.
class_encodings: A tf.int32 tensor with shape [N', num_classes] holding
K-hot encodings for the merged boxes.
confidence_encodings: A tf.float32 tensor with shape [N', num_classes]
holding encodings of confidences for the merged boxes.
merged_box_indices: A tf.int32 tensor with shape [N'] holding original
indices of the boxes.
"""
boxes_shape = tf.shape(boxes)
classes_shape = tf.shape(classes)
confidences_shape = tf.shape(confidences)
box_class_shape_assert = shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, classes_shape)
box_confidence_shape_assert = (
shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, confidences_shape))
box_dimension_assert = tf.assert_equal(boxes_shape[1], 4)
box_normalized_assert = shape_utils.assert_box_normalized(boxes)
with tf.control_dependencies(
[box_class_shape_assert, box_confidence_shape_assert,
box_dimension_assert, box_normalized_assert]):
quantized_boxes = tf.to_int64(boxes * (quantization_bins - 1))
ymin, xmin, ymax, xmax = tf.unstack(quantized_boxes, axis=1)
hashcodes = (
ymin +
xmin * quantization_bins +
ymax * quantization_bins * quantization_bins +
xmax * quantization_bins * quantization_bins * quantization_bins)
unique_hashcodes, unique_indices = tf.unique(hashcodes)
num_boxes = tf.shape(boxes)[0]
num_unique_boxes = tf.shape(unique_hashcodes)[0]
merged_box_indices = tf.unsorted_segment_min(
tf.range(num_boxes), unique_indices, num_unique_boxes)
merged_boxes = tf.gather(boxes, merged_box_indices)
unique_indices = tf.to_int64(unique_indices)
classes = tf.to_int64(classes)
def map_box_encodings(i):
"""Produces box K-hot and score encodings for each class index."""
box_mask = tf.equal(
unique_indices, i * tf.ones(num_boxes, dtype=tf.int64))
box_mask = tf.reshape(box_mask, [-1])
box_indices = tf.boolean_mask(classes, box_mask)
box_confidences = tf.boolean_mask(confidences, box_mask)
box_class_encodings = tf.sparse_to_dense(
box_indices, [num_classes], tf.constant(1, dtype=tf.int64),
validate_indices=False)
box_confidence_encodings = tf.sparse_to_dense(
box_indices, [num_classes], box_confidences, validate_indices=False)
return box_class_encodings, box_confidence_encodings
# Important to avoid int32 here since there is no GPU kernel for int32.
# int64 and float32 are fine.
class_encodings, confidence_encodings = tf.map_fn(
map_box_encodings,
tf.range(tf.to_int64(num_unique_boxes)),
back_prop=False,
dtype=(tf.int64, tf.float32))
merged_boxes = tf.reshape(merged_boxes, [-1, 4])
class_encodings = tf.cast(class_encodings, dtype=tf.int32)
class_encodings = tf.reshape(class_encodings, [-1, num_classes])
confidence_encodings = tf.reshape(confidence_encodings, [-1, num_classes])
merged_box_indices = tf.reshape(merged_box_indices, [-1])
return (merged_boxes, class_encodings, confidence_encodings,
merged_box_indices)
def nearest_neighbor_upsampling(input_tensor, scale=None, height_scale=None,
width_scale=None,
name='nearest_neighbor_upsampling'):
"""Nearest neighbor upsampling implementation.
Nearest neighbor upsampling function that maps input tensor with shape
[batch_size, height, width, channels] to [batch_size, height * scale
, width * scale, channels]. This implementation only uses reshape and
broadcasting to make it TPU compatible.
Args:
input_tensor: A float32 tensor of size [batch, height_in, width_in,
channels].
scale: An integer multiple to scale resolution of input data in both height
and width dimensions.
height_scale: An integer multiple to scale the height of input image. This
option when provided overrides `scale` option.
width_scale: An integer multiple to scale the width of input image. This
option when provided overrides `scale` option.
name: A name for the operation (optional).
Returns:
data_up: A float32 tensor of size
[batch, height_in*scale, width_in*scale, channels].
Raises:
ValueError: If both scale and height_scale or if both scale and width_scale
are None.
"""
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope(name):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = shape_utils.combined_static_and_dynamic_shape(input_tensor)
output_tensor = tf.stack([input_tensor] * w_scale, axis=3, name='w_stack')
# Adds a reshape op to avoid generating high-dimensional tensors that some
# compilers cannot deal with.
output_tensor = tf.reshape(output_tensor,
[batch_size, height, width * w_scale, channels])
output_tensor = tf.stack([output_tensor] * h_scale, axis=2, name='h_stack')
return tf.reshape(output_tensor,
[batch_size, height * h_scale, width * w_scale, channels])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
with tf.name_scope(scope, 'MatMulGather'):
params_shape = shape_utils.combined_static_and_dynamic_shape(params)
indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
def fpn_feature_levels(num_levels, unit_scale_index, image_ratio, boxes):
"""Returns fpn feature level for each box based on its area.
See section 4.2 of https://arxiv.org/pdf/1612.03144.pdf for details.
Args:
num_levels: An integer indicating the number of feature levels to crop boxes
from.
unit_scale_index: An 0-based integer indicating the index of feature map
which most closely matches the resolution of the pretrained model.
image_ratio: A float indicating the ratio of input image area to pretraining
image area.
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax] in normalized coordinates.
Returns:
An int32 tensor of shape [batch_size, num_boxes] containing feature indices.
"""
assert num_levels > 0, (
'`num_levels` must be > 0. Found {}'.format(num_levels))
assert unit_scale_index < num_levels and unit_scale_index >= 0, (
'`unit_scale_index` must be in [0, {}). Found {}.'.format(
num_levels, unit_scale_index))
box_height_width = boxes[:, :, 2:4] - boxes[:, :, 0:2]
areas_sqrt = tf.sqrt(tf.reduce_prod(box_height_width, axis=2))
log_2 = tf.cast(tf.log(2.0), dtype=boxes.dtype)
levels = tf.cast(
tf.floordiv(tf.log(areas_sqrt * image_ratio), log_2)
+
unit_scale_index,
dtype=tf.int32)
levels = tf.maximum(0, tf.minimum(num_levels - 1, levels))
return levels
def bfloat16_to_float32_nested(input_nested):
"""Convert float32 tensors in a nested structure to bfloat16.
Args:
input_nested: A Python dict, values being Tensor or Python list/tuple of
Tensor or Non-Tensor.
Returns:
A Python dict with the same structure as `tensor_dict`,
with all bfloat16 tensors converted to float32.
"""
if isinstance(input_nested, tf.Tensor):
if input_nested.dtype == tf.bfloat16:
return tf.cast(input_nested, dtype=tf.float32)
else:
return input_nested
elif isinstance(input_nested, (list, tuple)):
out_tensor_dict = [bfloat16_to_float32_nested(t) for t in input_nested]
elif isinstance(input_nested, dict):
out_tensor_dict = {
k: bfloat16_to_float32_nested(v) for k, v in input_nested.items()
}
else:
return input_nested
return out_tensor_dict
def gather_with_padding_values(input_tensor, indices, padding_value):
"""Gathers elements from tensor and pads `padding_value` for ignore indices.
Gathers elements from `input_tensor` based on `indices`. If there are ignore
indices (which are "-1"s) in `indices`, `padding_value` will be gathered for
those positions.
Args:
input_tensor: A N-D tensor of shape [M, d_1, d_2 .. d_(N-1)] to gather
values from.
indices: A 1-D tensor in which each element is either an index in the
first dimension of input_tensor or -1.
padding_value: A (N-1)-D tensor of shape [d_1, d_2 .. d_(N-1)] which will be
used as gathered value for each ignore index in `indices`.
Returns:
gathered_tensor: A tensor of shape [L, d_1, d_2 .. d_(N-1)] containing
values gathered from input_tensor. The first dimension L is equal to the
length of `indices`.
"""
padding_value = tf.expand_dims(padding_value, axis=0)
input_tensor = tf.concat([padding_value, input_tensor], axis=0)
gather_indices = indices + 1
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
EqualizationLossConfig = collections.namedtuple('EqualizationLossConfig',
['weight', 'exclude_prefixes'])
def tile_context_tensors(tensor_dict):
"""Tiles context fields to have num_frames along 0-th dimension."""
num_frames = tf.shape(tensor_dict[fields.InputDataFields.image])[0]
for key in tensor_dict:
if key not in fields.SEQUENCE_FIELDS:
original_tensor = tensor_dict[key]
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
original_tensor)
tensor_dict[key] = tf.tile(
tf.expand_dims(original_tensor, 0),
tf.stack([num_frames] + [1] * len(tensor_shape), axis=0))
return tensor_dict
def decode_image(tensor_dict):
"""Decodes images in a tensor dict."""
tensor_dict[fields.InputDataFields.image] = tf.io.decode_image(
tensor_dict[fields.InputDataFields.image], channels=3)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
return tensor_dict
def giou(boxes1, boxes2):
"""Computes generalized IOU between two tensors.
Each box should be represented as [ymin, xmin, ymax, xmax].
Args:
boxes1: a tensor with shape [num_boxes, 4]
boxes2: a tensor with shape [num_boxes, 4]
Returns:
a tensor of shape [num_boxes] containing GIoUs
"""
pred_ymin, pred_xmin, pred_ymax, pred_xmax = tf.unstack(boxes1, axis=1)
gt_ymin, gt_xmin, gt_ymax, gt_xmax = tf.unstack(boxes2, axis=1)
gt_area = (gt_ymax - gt_ymin) * (gt_xmax - gt_xmin)
pred_area = (pred_ymax - pred_ymin) * (pred_xmax - pred_xmin)
x1_i = tf.maximum(pred_xmin, gt_xmin)
x2_i = tf.minimum(pred_xmax, gt_xmax)
y1_i = tf.maximum(pred_ymin, gt_ymin)
y2_i = tf.minimum(pred_ymax, gt_ymax)
intersection_area = tf.maximum(0.0, y2_i - y1_i) * tf.maximum(0.0,
x2_i - x1_i)
x1_c = tf.minimum(pred_xmin, gt_xmin)
x2_c = tf.maximum(pred_xmax, gt_xmax)
y1_c = tf.minimum(pred_ymin, gt_ymin)
y2_c = tf.maximum(pred_ymax, gt_ymax)
hull_area = (y2_c - y1_c) * (x2_c - x1_c)
union_area = gt_area + pred_area - intersection_area
iou = tf.where(tf.equal(union_area, 0.0),
tf.zeros_like(union_area), intersection_area / union_area)
giou_ = iou - tf.where(hull_area > 0.0,
(hull_area - union_area) / hull_area, iou)
return giou_
def center_to_corner_coordinate(input_tensor):
"""Converts input boxes from center to corner representation."""
reshaped_encodings = tf.reshape(input_tensor, [-1, 4])
ycenter = tf.gather(reshaped_encodings, [0], axis=1)
xcenter = tf.gather(reshaped_encodings, [1], axis=1)
h = tf.gather(reshaped_encodings, [2], axis=1)
w = tf.gather(reshaped_encodings, [3], axis=1)
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.squeeze(tf.stack([ymin, xmin, ymax, xmax], axis=1))
| 49,614 | 39.970273 | 80 | py |
models | models-master/research/object_detection/utils/shape_utils_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.shape_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
from object_detection.utils import test_case
class UtilTest(test_case.TestCase):
def test_pad_tensor_using_integer_input(self):
print('........pad tensor using interger input.')
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
pad_t2 = shape_utils.pad_tensor(t2, 2)
return pad_t1, pad_t2
pad_t1_result, pad_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_pad_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
pad_t2 = shape_utils.pad_tensor(t2, tf.constant(2))
return pad_t1, pad_t2
pad_t1_result, pad_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_clip_tensor_using_integer_input(self):
def graph_fn():
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, 2)
self.assertEqual(2, clip_t1.get_shape()[0])
self.assertEqual(2, clip_t2.get_shape()[0])
return clip_t1, clip_t2
clip_t1_result, clip_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_clip_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, tf.constant(2))
return clip_t1, clip_t2
clip_t1_result, clip_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_pad_or_clip_tensor_using_integer_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, 2)
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, 2)
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, 2)
self.assertEqual(2, tt1.get_shape()[0])
self.assertEqual(2, tt2.get_shape()[0])
self.assertEqual(2, tt3.get_shape()[0])
self.assertEqual(2, tt4.get_shape()[0])
return tt1, tt2, tt3, tt4
tt1_result, tt2_result, tt3_result, tt4_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_pad_or_clip_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, tf.constant(2))
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, tf.constant(2))
return tt1, tt2, tt3, tt4
tt1_result, tt2_result, tt3_result, tt4_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_combined_static_dynamic_shape(self):
for n in [2, 3, 4]:
tensor = tf.zeros((n, 2, 3))
combined_shape = shape_utils.combined_static_and_dynamic_shape(
tensor)
self.assertListEqual(combined_shape[1:], [2, 3])
def test_pad_or_clip_nd_tensor(self):
def graph_fn(input_tensor):
output_tensor = shape_utils.pad_or_clip_nd(
input_tensor, [None, 3, 5, tf.constant(6)])
return output_tensor
for n in [2, 3, 4, 5]:
input_np = np.zeros((n, 5, 4, 7))
output_tensor_np = self.execute(graph_fn, [input_np])
self.assertAllEqual(output_tensor_np.shape[1:], [3, 5, 6])
class StaticOrDynamicMapFnTest(test_case.TestCase):
def test_with_dynamic_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
def graph_fn(input_tensor):
return shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
# The input has different shapes, but due to how self.execute()
# works, the shape is known at graph compile time.
result1 = self.execute(
graph_fn, [np.array([[1, 2], [3, 1], [0, 4]]),])
result2 = self.execute(
graph_fn, [np.array([[-1, 1], [0, 9]]),])
self.assertAllEqual(result1, [3, 4, 4])
self.assertAllEqual(result2, [0, 9])
def test_with_static_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
def graph_fn():
input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32)
return shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [3, 4, 4])
def test_with_multiple_dynamic_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
def graph_fn(input_tensor, scalar_index_tensor):
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
return map_fn_output
# The input has different shapes, but due to how self.execute()
# works, the shape is known at graph compile time.
result1 = self.execute(
graph_fn, [
np.array([[1, 2, 3], [4, 5, -1], [0, 6, 9]]),
np.array([[0], [2], [1]]),
])
result2 = self.execute(
graph_fn, [
np.array([[-1, 1, 0], [3, 9, 30]]),
np.array([[1], [0]])
])
self.assertAllEqual(result1, [1, -1, 6])
self.assertAllEqual(result2, [1, 3])
def test_with_multiple_static_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
def graph_fn():
input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]],
dtype=tf.float32)
scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
return map_fn_output
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [1, -1, 6])
def test_fails_with_nested_input(self):
def fn(input_tensor):
return input_tensor
input_tensor1 = tf.constant([1])
input_tensor2 = tf.constant([2])
with self.assertRaisesRegexp(
ValueError, '`elems` must be a Tensor or list of Tensors.'):
shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor1, [input_tensor2]], dtype=tf.float32)
class CheckMinImageShapeTest(test_case.TestCase):
def test_check_min_image_dim_static_shape(self):
input_tensor = tf.constant(np.zeros([1, 42, 42, 3]))
_ = shape_utils.check_min_image_dim(33, input_tensor)
with self.assertRaisesRegexp(
ValueError, 'image size must be >= 64 in both height and width.'):
_ = shape_utils.check_min_image_dim(64, input_tensor)
def test_check_min_image_dim_dynamic_shape(self):
def graph_fn(input_tensor):
return shape_utils.check_min_image_dim(33, input_tensor)
self.execute(graph_fn,
[np.zeros([1, 42, 42, 3])])
self.assertRaises(
ValueError, self.execute,
graph_fn, np.zeros([1, 32, 32, 3])
)
class AssertShapeEqualTest(test_case.TestCase):
def test_unequal_static_shape_raises_exception(self):
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 2, 3, 1]))
self.assertRaisesRegex(
ValueError, 'Unequal shapes',
shape_utils.assert_shape_equal,
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b)
)
def test_equal_static_shape_succeeds(self):
def graph_fn():
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 2, 2, 1]))
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b))
return tf.constant(0)
self.execute(graph_fn, [])
def test_unequal_dynamic_shape_raises_tf_assert(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.assertRaises(ValueError,
self.execute, graph_fn,
[np.zeros([1, 2, 2, 3]), np.zeros([1, 4, 4, 3])])
def test_equal_dynamic_shape_succeeds(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b)
)
return tf.constant(0)
self.execute(graph_fn, [np.zeros([1, 2, 2, 3]),
np.zeros([1, 2, 2, 3])])
def test_unequal_static_shape_along_first_dim_raises_exception(self):
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([6, 2, 3, 1]))
self.assertRaisesRegexp(
ValueError, 'Unequal first dimension',
shape_utils.assert_shape_equal_along_first_dimension,
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b)
)
def test_equal_static_shape_along_first_dim_succeeds(self):
def graph_fn():
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 7, 2]))
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b))
return tf.constant(0)
self.execute(graph_fn, [])
def test_unequal_dynamic_shape_along_first_dim_raises_tf_assert(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.assertRaises(ValueError,
self.execute, graph_fn,
[np.zeros([1, 2, 2, 3]), np.zeros([2, 4, 3])])
def test_equal_dynamic_shape_along_first_dim_succeeds(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.execute(graph_fn, [np.zeros([5, 2, 2, 3]), np.zeros([5])])
class FlattenExpandDimensionTest(test_case.TestCase):
def test_flatten_given_dims(self):
def graph_fn():
inputs = tf.random_uniform([5, 2, 10, 10, 3])
actual_flattened = shape_utils.flatten_dimensions(inputs, first=1, last=3)
expected_flattened = tf.reshape(inputs, [5, 20, 10, 3])
return actual_flattened, expected_flattened
(actual_flattened_np,
expected_flattened_np) = self.execute(graph_fn, [])
self.assertAllClose(expected_flattened_np, actual_flattened_np)
def test_raises_value_error_incorrect_dimensions(self):
inputs = tf.random_uniform([5, 2, 10, 10, 3])
self.assertRaises(ValueError,
shape_utils.flatten_dimensions, inputs,
first=0, last=6)
def test_flatten_first_two_dimensions(self):
def graph_fn():
inputs = tf.constant(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]
], dtype=tf.int32)
flattened_tensor = shape_utils.flatten_first_n_dimensions(
inputs, 2)
return flattened_tensor
flattened_tensor_out = self.execute(graph_fn, [])
expected_output = [[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12]]
self.assertAllEqual(expected_output, flattened_tensor_out)
def test_expand_first_dimension(self):
def graph_fn():
inputs = tf.constant(
[
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12]
], dtype=tf.int32)
dims = [3, 2]
expanded_tensor = shape_utils.expand_first_dimension(
inputs, dims)
return expanded_tensor
expanded_tensor_out = self.execute(graph_fn, [])
expected_output = [
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]]
self.assertAllEqual(expected_output, expanded_tensor_out)
def test_expand_first_dimension_with_incompatible_dims(self):
def graph_fn():
inputs = tf.constant(
[
[[1, 2]],
[[3, 4]],
[[5, 6]],
], dtype=tf.int32)
dims = [3, 2]
expanded_tensor = shape_utils.expand_first_dimension(
inputs, dims)
return expanded_tensor
self.assertRaises(ValueError, self.execute, graph_fn, [])
if __name__ == '__main__':
tf.test.main()
| 15,305 | 32.788079 | 80 | py |
models | models-master/research/object_detection/utils/category_util_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.category_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import category_util
class EvalUtilTest(tf.test.TestCase):
def test_load_categories_from_csv_file(self):
csv_data = """
0,"cat"
1,"dog"
2,"bird"
""".strip(' ')
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
with tf.gfile.Open(csv_path, 'wb') as f:
f.write(csv_data)
categories = category_util.load_categories_from_csv_file(csv_path)
self.assertTrue({'id': 0, 'name': 'cat'} in categories)
self.assertTrue({'id': 1, 'name': 'dog'} in categories)
self.assertTrue({'id': 2, 'name': 'bird'} in categories)
def test_save_categories_to_csv_file(self):
categories = [
{'id': 0, 'name': 'cat'},
{'id': 1, 'name': 'dog'},
{'id': 2, 'name': 'bird'},
]
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
category_util.save_categories_to_csv_file(categories, csv_path)
saved_categories = category_util.load_categories_from_csv_file(csv_path)
self.assertEqual(saved_categories, categories)
if __name__ == '__main__':
tf.test.main()
| 1,987 | 32.133333 | 80 | py |
models | models-master/research/object_detection/utils/dataset_util.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(' ')[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
| 2,837 | 28.873684 | 80 | py |
models | models-master/research/object_detection/utils/np_box_mask_list_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for np_box_mask_list.BoxMaskList.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_mask_ops
def box_list_to_box_mask_list(boxlist):
"""Converts a BoxList containing 'masks' into a BoxMaskList.
Args:
boxlist: An np_box_list.BoxList object.
Returns:
An np_box_mask_list.BoxMaskList object.
Raises:
ValueError: If boxlist does not contain `masks` as a field.
"""
if not boxlist.has_field('masks'):
raise ValueError('boxlist does not contain mask field.')
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxlist.get(),
mask_data=boxlist.get_field('masks'))
extra_fields = boxlist.get_extra_fields()
for key in extra_fields:
if key != 'masks':
box_mask_list.data[key] = boxlist.get_field(key)
return box_mask_list
def area(box_mask_list):
"""Computes area of masks.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes and masks
Returns:
a numpy array with shape [N*1] representing mask areas
"""
return np_mask_ops.area(box_mask_list.get_masks())
def intersection(box_mask_list1, box_mask_list2):
"""Compute pairwise intersection areas between masks.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
return np_mask_ops.intersection(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def iou(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-union between box and mask collections.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
return np_mask_ops.iou(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def ioa(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-area between box and mask collections.
Intersection-over-area (ioa) between two masks mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
return np_mask_ops.ioa(box_mask_list1.get_masks(), box_mask_list2.get_masks())
def gather(box_mask_list, indices, fields=None):
"""Gather boxes from np_box_mask_list.BoxMaskList according to indices.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the box_mask_list (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subbox_mask_list: a np_box_mask_list.BoxMaskList corresponding to the subset
of the input box_mask_list specified by indices
Raises:
ValueError: if specified field is not contained in box_mask_list or if the
indices are not of type int_
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.gather(
boxlist=box_mask_list, indices=indices, fields=fields))
def sort_by_field(box_mask_list, field,
order=np_box_list_ops.SortOrder.DESCEND):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
box_mask_list: BoxMaskList holding N boxes.
field: A BoxMaskList field for sorting and reordering the BoxMaskList.
order: (Optional) 'descend' or 'ascend'. Default is descend.
Returns:
sorted_box_mask_list: A sorted BoxMaskList with the field in the specified
order.
"""
return box_list_to_box_mask_list(
np_box_list_ops.sort_by_field(
boxlist=box_mask_list, field=field, order=order))
def non_max_suppression(box_mask_list,
max_output_size=10000,
iou_threshold=1.0,
score_threshold=-10.0):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. In each iteration, the detected bounding box with
highest score in the available pool is selected.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain
a 'scores' field representing detection scores. All scores belong to the
same class.
max_output_size: maximum number of retained boxes
iou_threshold: intersection over union threshold.
score_threshold: minimum score threshold. Remove the boxes with scores
less than this value. Default value is set to -10. A very
low threshold to pass pretty much all the boxes, unless
the user sets a different score threshold.
Returns:
an np_box_mask_list.BoxMaskList holding M boxes where M <= max_output_size
Raises:
ValueError: if 'scores' field does not exist
ValueError: if threshold is not in [0, 1]
ValueError: if max_output_size < 0
"""
if not box_mask_list.has_field('scores'):
raise ValueError('Field scores does not exist')
if iou_threshold < 0. or iou_threshold > 1.0:
raise ValueError('IOU threshold must be in [0, 1]')
if max_output_size < 0:
raise ValueError('max_output_size must be bigger than 0.')
box_mask_list = filter_scores_greater_than(box_mask_list, score_threshold)
if box_mask_list.num_boxes() == 0:
return box_mask_list
box_mask_list = sort_by_field(box_mask_list, 'scores')
# Prevent further computation if NMS is disabled.
if iou_threshold == 1.0:
if box_mask_list.num_boxes() > max_output_size:
selected_indices = np.arange(max_output_size)
return gather(box_mask_list, selected_indices)
else:
return box_mask_list
masks = box_mask_list.get_masks()
num_masks = box_mask_list.num_boxes()
# is_index_valid is True only for all remaining valid boxes,
is_index_valid = np.full(num_masks, 1, dtype=bool)
selected_indices = []
num_output = 0
for i in range(num_masks):
if num_output < max_output_size:
if is_index_valid[i]:
num_output += 1
selected_indices.append(i)
is_index_valid[i] = False
valid_indices = np.where(is_index_valid)[0]
if valid_indices.size == 0:
break
intersect_over_union = np_mask_ops.iou(
np.expand_dims(masks[i], axis=0), masks[valid_indices])
intersect_over_union = np.squeeze(intersect_over_union, axis=0)
is_index_valid[valid_indices] = np.logical_and(
is_index_valid[valid_indices],
intersect_over_union <= iou_threshold)
return gather(box_mask_list, np.array(selected_indices))
def multi_class_non_max_suppression(box_mask_list, score_thresh, iou_thresh,
max_output_size):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a
'scores' field representing detection scores. This scores field is a
tensor that can be 1 dimensional (in the case of a single class) or
2-dimensional, in which case we assume that it takes the
shape [num_boxes, num_classes]. We further assume that this rank is known
statically and that scores.shape[1] is also known (i.e., the number of
classes is fixed and known at graph construction time).
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap
with previously selected boxes are removed).
max_output_size: maximum number of retained boxes per class.
Returns:
a box_mask_list holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input box_mask_list does
not have a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a box_mask_list')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) == 1:
scores = np.reshape(scores, [-1, 1])
elif len(scores.shape) == 2:
if scores.shape[1] is None:
raise ValueError('scores field must have statically defined second '
'dimension')
else:
raise ValueError('scores field must be of rank 1 or 2')
num_boxes = box_mask_list.num_boxes()
num_scores = scores.shape[0]
num_classes = scores.shape[1]
if num_boxes != num_scores:
raise ValueError('Incorrect scores field length: actual vs expected.')
selected_boxes_list = []
for class_idx in range(num_classes):
box_mask_list_and_class_scores = np_box_mask_list.BoxMaskList(
box_data=box_mask_list.get(),
mask_data=box_mask_list.get_masks())
class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])
box_mask_list_and_class_scores.add_field('scores', class_scores)
box_mask_list_filt = filter_scores_greater_than(
box_mask_list_and_class_scores, score_thresh)
nms_result = non_max_suppression(
box_mask_list_filt,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
nms_result.add_field(
'classes',
np.zeros_like(nms_result.get_field('scores')) + class_idx)
selected_boxes_list.append(nms_result)
selected_boxes = np_box_list_ops.concatenate(selected_boxes_list)
sorted_boxes = np_box_list_ops.sort_by_field(selected_boxes, 'scores')
return box_list_to_box_mask_list(boxlist=sorted_boxes)
def prune_non_overlapping_masks(box_mask_list1, box_mask_list2, minoverlap=0.0):
"""Prunes the boxes in list1 that overlap less than thresh with list2.
For each mask in box_mask_list1, we want its IOA to be more than minoverlap
with at least one of the masks in box_mask_list2. If it does not, we remove
it. If the masks are not full size image, we do the pruning based on boxes.
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks.
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks.
minoverlap: Minimum required overlap between boxes, to count them as
overlapping.
Returns:
A pruned box_mask_list with size [N', 4].
"""
intersection_over_area = ioa(box_mask_list2, box_mask_list1) # [M, N] tensor
intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor
keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))
keep_inds = np.nonzero(keep_bool)[0]
new_box_mask_list1 = gather(box_mask_list1, keep_inds)
return new_box_mask_list1
def concatenate(box_mask_lists, fields=None):
"""Concatenate list of box_mask_lists.
This op concatenates a list of input box_mask_lists into a larger
box_mask_list. It also
handles concatenation of box_mask_list fields as long as the field tensor
shapes are equal except for the first dimension.
Args:
box_mask_lists: list of np_box_mask_list.BoxMaskList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxMaskList in the list are included in the
concatenation.
Returns:
a box_mask_list with number of boxes equal to
sum([box_mask_list.num_boxes() for box_mask_list in box_mask_list])
Raises:
ValueError: if box_mask_lists is invalid (i.e., is not a list, is empty, or
contains non box_mask_list objects), or if requested fields are not
contained in all box_mask_lists
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.concatenate(boxlists=box_mask_lists, fields=fields))
def filter_scores_greater_than(box_mask_list, thresh):
"""Filter to keep only boxes and masks with score exceeding a given threshold.
This op keeps the collection of boxes and masks whose corresponding scores are
greater than the input threshold.
Args:
box_mask_list: BoxMaskList holding N boxes and masks. Must contain a
'scores' field representing detection scores.
thresh: scalar threshold
Returns:
a BoxMaskList holding M boxes and masks where M <= N
Raises:
ValueError: if box_mask_list not a np_box_mask_list.BoxMaskList object or
if it does not have a scores field
"""
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a BoxMaskList')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape) == 2 and scores.shape[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = np.reshape(np.where(np.greater(scores, thresh)),
[-1]).astype(np.int32)
return gather(box_mask_list, high_score_indices)
| 15,693 | 37.750617 | 80 | py |
models | models-master/research/object_detection/utils/bifpn_utils.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to manipulate feature map pyramids, such as for FPNs and BiFPNs.
Includes utility functions to facilitate feature pyramid map manipulations,
such as combining multiple feature maps, upsampling or downsampling feature
maps, and applying blocks of convolution, batchnorm, and activation layers.
"""
from six.moves import range
import tensorflow as tf
from object_detection.utils import ops
from object_detection.utils import shape_utils
def create_conv_block(name, num_filters, kernel_size, strides, padding,
use_separable, apply_batchnorm, apply_activation,
conv_hyperparams, is_training, freeze_batchnorm,
conv_bn_act_pattern=True):
"""Create Keras layers for regular or separable convolutions.
Args:
name: String. The name of the layer.
num_filters: Number of filters (channels) for the output feature maps.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters, or a single int if both values are the same.
strides: A list of length 2: [stride_height, stride_width], specifying the
convolution stride, or a single int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
use_separable: Bool. Whether to use depthwise separable convolution instead
of regular convolution.
apply_batchnorm: Bool. Whether to apply a batch normalization layer after
convolution, constructed according to the conv_hyperparams.
apply_activation: Bool. Whether to apply an activation layer after
convolution, constructed according to the conv_hyperparams.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
conv_bn_act_pattern: Bool. By default, when True, the layers returned by
this function are in the order [conv, batchnorm, activation]. Otherwise,
when False, the order of the layers is [activation, conv, batchnorm].
Returns:
A list of keras layers, including (regular or seperable) convolution, and
optionally batch normalization and activation layers.
"""
layers = []
if use_separable:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
# TODO(aom): Verify that the pointwise regularizer/initializer should be set
# here, since this is not the case in feature_map_generators.py
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
filters=num_filters,
kernel_size=kernel_size,
depth_multiplier=1,
padding=padding,
strides=strides,
name=name + 'separable_conv',
**kwargs))
else:
layers.append(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
padding=padding,
strides=strides,
name=name + 'conv',
**conv_hyperparams.params()))
if apply_batchnorm:
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'batchnorm'))
if apply_activation:
activation_layer = conv_hyperparams.build_activation_layer(
name=name + 'activation')
if conv_bn_act_pattern:
layers.append(activation_layer)
else:
layers = [activation_layer] + layers
return layers
def create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling feature maps.
Args:
scale: Int. The scale factor by which to downsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i+1 is 2.
downsample_method: String. The method used for downsampling. Currently
supported methods include 'max_pooling', 'avg_pooling', and
'depthwise_conv'.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample input feature maps by the
desired scale factor.
"""
layers = []
padding = 'SAME'
stride = int(scale)
kernel_size = stride + 1
if downsample_method == 'max_pooling':
layers.append(
tf.keras.layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_max_x{}'.format(stride)))
elif downsample_method == 'avg_pooling':
layers.append(
tf.keras.layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_avg_x{}'.format(stride)))
elif downsample_method == 'depthwise_conv':
layers.append(
tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_depthwise_x{}'.format(stride)))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'downsample_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(name=name +
'downsample_activation'))
else:
raise ValueError('Unknown downsample method: {}'.format(downsample_method))
return layers
def create_upsample_feature_map_ops(scale, use_native_resize_op, name):
"""Creates Keras layers for upsampling feature maps.
Args:
scale: Int. The scale factor by which to upsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i-1 is 2.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will upsample input feature maps by the
desired scale factor.
"""
layers = []
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.compat.v1.image.resize_nearest_neighbor(
image, [image_shape[1] * scale, image_shape[2] * scale])
layers.append(
tf.keras.layers.Lambda(
resize_nearest_neighbor,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=scale)
layers.append(
tf.keras.layers.Lambda(
nearest_neighbor_upsampling,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
return layers
def create_resample_feature_map_ops(input_scale_factor, output_scale_factor,
downsample_method, use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling or upsampling feature maps.
Args:
input_scale_factor: Int. Scale factor of the input feature map. For example,
for a feature pyramid where each successive level halves its spatial
resolution, the scale factor of a level is 2^level. The input and output
scale factors are used to compute the scale for upsampling or downsamling,
so they should be evenly divisible.
output_scale_factor: Int. Scale factor of the output feature map. See
input_scale_factor for additional details.
downsample_method: String. The method used for downsampling. See
create_downsample_feature_map_ops for details on supported methods.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
See create_upsample_feature_map_ops for details.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample or upsample input feature maps
to match the desired output feature map scale.
"""
if input_scale_factor < output_scale_factor:
if output_scale_factor % input_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not divisible by'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = output_scale_factor // input_scale_factor
return create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name)
elif input_scale_factor > output_scale_factor:
if input_scale_factor % output_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not a divisor of'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = input_scale_factor // output_scale_factor
return create_upsample_feature_map_ops(scale, use_native_resize_op, name)
else:
return []
# TODO(aom): Add tests for this module in a followup CL.
class BiFPNCombineLayer(tf.keras.layers.Layer):
"""Combines multiple input feature maps into a single output feature map.
A Keras layer which combines multiple input feature maps into a single output
feature map, according to the desired combination method. Options for
combining feature maps include simple summation, or several types of weighted
sums using learned weights for each input feature map. These include
'weighted_sum', 'attention', and 'fast_attention'. For more details, see the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070.
Specifically, this layer takes a list of tensors as input, all of the same
shape, and returns a single tensor, also of the same shape.
"""
def __init__(self, combine_method, **kwargs):
"""Constructor.
Args:
combine_method: String. The method used to combine the input feature maps
into a single output feature map. One of 'sum', 'weighted_sum',
'attention', or 'fast_attention'.
**kwargs: Additional Keras layer arguments.
"""
super(BiFPNCombineLayer, self).__init__(**kwargs)
self.combine_method = combine_method
def _combine_weighted_sum(self, inputs):
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), self.per_input_weights),
axis=[-1])
def _combine_attention(self, inputs):
normalized_weights = tf.nn.softmax(self.per_input_weights)
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def _combine_fast_attention(self, inputs):
weights_non_neg = tf.nn.relu(self.per_input_weights)
normalizer = tf.reduce_sum(weights_non_neg) + 0.0001
normalized_weights = weights_non_neg / normalizer
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A BiFPN combine layer should be called '
'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A BiFPN combine layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
if self.combine_method == 'sum':
self._combine_op = tf.keras.layers.Add()
elif self.combine_method == 'weighted_sum':
self._combine_op = self._combine_weighted_sum
elif self.combine_method == 'attention':
self._combine_op = self._combine_attention
elif self.combine_method == 'fast_attention':
self._combine_op = self._combine_fast_attention
else:
raise ValueError('Unknown combine type: {}'.format(self.combine_method))
if self.combine_method in {'weighted_sum', 'attention', 'fast_attention'}:
self.per_input_weights = self.add_weight(
name='bifpn_combine_weights',
shape=(len(input_shape), 1),
initializer='ones',
trainable=True)
super(BiFPNCombineLayer, self).build(input_shape)
def call(self, inputs):
"""Combines multiple input feature maps into a single output feature map.
Executed when calling the `.__call__` method on input.
Args:
inputs: A list of tensors where all tensors have the same shape, [batch,
height_i, width_i, depth_i].
Returns:
A single tensor, with the same shape as the input tensors,
[batch, height_i, width_i, depth_i].
"""
return self._combine_op(inputs)
def compute_output_shape(self, input_shape):
output_shape = input_shape[0]
for i in range(1, len(input_shape)):
if input_shape[i] != output_shape:
raise ValueError(
'Inputs could not be combined. Shapes should match, '
'but input_shape[0] is {} while input_shape[{}] is {}'.format(
output_shape, i, input_shape[i]))
| 15,597 | 42.448468 | 80 | py |
models | models-master/research/object_detection/utils/object_detection_evaluation.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import collections
import logging
import unicodedata
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import label_map_util
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
class DetectionEvaluator(six.with_metaclass(ABCMeta, object)):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
def observe_result_dict_for_single_example(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
raise NotImplementedError('Not implemented for this evaluator!')
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required for
evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required for
evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
recall_lower_bound: lower bound of recall operating area.
recall_upper_bound: upper bound of recall operating area.
evaluate_corlocs: (optional) boolean which determines if corloc scores are
to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes. If
True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
nms_iou_threshold: NMS IoU threashold.
nms_max_output_boxes: maximal number of boxes after NMS.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._recall_lower_bound = recall_lower_bound
self._recall_upper_bound = recall_upper_bound
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._nms_iou_threshold = nms_iou_threshold
self._nms_max_output_boxes = nms_max_output_boxes
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
recall_lower_bound=self._recall_lower_bound,
recall_upper_bound=self._recall_upper_bound,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight,
nms_iou_threshold=self._nms_iou_threshold,
nms_max_output_boxes=self._nms_max_output_boxes)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def get_internal_state(self):
"""Returns internal state and image ids that lead to the state.
Note that only evaluation results will be returned (e.g. not raw predictions
or groundtruth.
"""
return self._evaluation.get_internal_state(), self._image_ids
def merge_internal_state(self, image_ids, state_tuple):
"""Merges internal state with the existing state of evaluation.
If image_id is already seen by evaluator, an error will be thrown.
Args:
image_ids: list of images whose state is stored in the tuple.
state_tuple: state.
"""
for image_id in image_ids:
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
self._image_ids.update(image_ids)
self._evaluation.merge_internal_state(state_tuple)
def _build_metric_names(self):
"""Builds a list with metric names."""
if self._recall_lower_bound > 0.0 or self._recall_upper_bound < 1.0:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU@[{:.1f},{:.1f}]Recall'.format(
self._matching_iou_threshold, self._recall_lower_bound,
self._recall_upper_bound)
]
else:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU'.format(self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length M
numpy boolean array denoting whether a ground truth box is a difficult
instance or not. This field is optional to support the case that no
boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult
in six.viewkeys(groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks
not in groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy array
of shape [num_boxes, height, width] containing `num_boxes` masks of
values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
nms_iou_threshold=self._nms_iou_threshold,
nms_max_output_boxes=self._nms_max_output_boxes,
)
self._image_ids.clear()
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = list(eval_dict_filtered.keys())
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(six.itervalues(eval_dict_filtered))
return tf.py_func(update_op, args, [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
update_op = self.add_eval_dict(eval_dict)
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using precision@recall metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
skip_predictions_for_unlabeled_class=False):
super(PrecisionAtRecallDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
recall_lower_bound=recall_lower_bound,
recall_upper_bound=recall_upper_bound,
evaluate_corlocs=False,
metric_prefix='PrecisionAtRecallBoxes',
use_weighted_mean_ap=False)
self._skip_predictions_for_unlabeled_class = skip_predictions_for_unlabeled_class
self._expected_keys.update(
[standard_fields.InputDataFields.groundtruth_labeled_classes])
self.groundtruth_labeled_classes = {}
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the labeled classes field is present, a map of image_id to
groundtruth_labeled_classes is populated with the one-hot labeled classes.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_labeled_classes: Optional
numpy one-hot integer array of shape [num_classes+1] containing 1
for classes that are labeled in the image and 0 otherwise.
Raises:
ValueError: If shape of labeled classes field is not as expected.
"""
super(PrecisionAtRecallDetectionEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
labeled_classes = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_labeled_classes, None)
if self._skip_predictions_for_unlabeled_class and labeled_classes is not None:
if labeled_classes.shape != (self._num_classes + 1,):
raise ValueError('Invalid shape for groundtruth labeled classes: {}, '
'num_categories_including_background: {}'.format(
labeled_classes, self._num_classes + 1))
labeled_classes = np.flatnonzero(labeled_classes == 1).tolist()
self.groundtruth_labeled_classes[image_id] = labeled_classes
else:
self.groundtruth_labeled_classes[image_id] = None
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
If the labeled classes field has been populated for the given image_id,
the detections for classes that are not in the labeled classes are
filtered out.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
"""
groundtruth_labeled_classes = self.groundtruth_labeled_classes[image_id]
if groundtruth_labeled_classes is not None:
detection_classes_key = standard_fields.DetectionResultFields.detection_classes
detected_boxes_key = standard_fields.DetectionResultFields.detection_boxes
detected_scores_key = standard_fields.DetectionResultFields.detection_scores
# Only keep detection if label is in groundtruth_labeled_classes.
allowed = np.isin(detections_dict[detection_classes_key],
groundtruth_labeled_classes)
detections_dict[detection_classes_key] = detections_dict[
detection_classes_key][allowed]
detections_dict[detected_boxes_key] = detections_dict[detected_boxes_key][
allowed]
detections_dict[detected_scores_key] = detections_dict[
detected_scores_key][allowed]
super(PrecisionAtRecallDetectionEvaluator,
self).add_single_detected_image_info(image_id, detections_dict)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_masks=False,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_masks: if True, evaluator evaluates masks.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight,
evaluate_masks=evaluate_masks)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
if evaluate_masks:
self._expected_keys.add(
standard_fields.InputDataFields.groundtruth_instance_masks)
self._expected_keys.add(
standard_fields.DetectionResultFields.detection_masks)
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth group_of flag specified',
image_id)
if self._evaluate_masks:
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
else:
groundtruth_masks = None
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge metrics.
Both Detection and Instance Segmentation evaluation metrics are implemented.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
Open Images Challenge Instance Segmentation metric allows to measure per
formance of models in case of incomplete annotations: some instances are
annotations only on box level and some - on image-level. In addition,
image-level labels are taken into account as in detection metric.
Open Images Challenge Detection metric default parameters:
evaluate_masks = False
group_of_weight = 1.0
Open Images Challenge Instance Segmentation metric default parameters:
evaluate_masks = True
(group_of_weight will not matter)
"""
def __init__(self,
categories,
evaluate_masks=False,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluate_masks: set to true for instance segmentation metric and to false
for detection metric.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: Weight of group-of boxes. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
"""
if not evaluate_masks:
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_masks=evaluate_masks,
evaluate_corlocs=evaluate_corlocs,
group_of_weight=group_of_weight,
metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
# Only one of the two has to be provided, but both options are given
# for compatibility with previous codebase.
self._expected_keys.update([
standard_fields.InputDataFields.groundtruth_image_classes,
standard_fields.InputDataFields.groundtruth_labeled_classes])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
input_fields = standard_fields.InputDataFields
groundtruth_classes = (
groundtruth_dict[input_fields.groundtruth_classes] -
self._label_id_offset)
image_classes = np.array([], dtype=int)
if input_fields.groundtruth_image_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_image_classes]
elif input_fields.groundtruth_labeled_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_labeled_classes]
else:
logging.warning('No image classes field found for image with id %s!',
image_id)
image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique(
np.concatenate((image_classes, groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[standard_fields.DetectionResultFields
.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def clear(self):
"""Clears stored data."""
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class OpenImagesDetectionChallengeEvaluator(OpenImagesChallengeEvaluator):
"""A class implements Open Images Detection Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=False,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=1.0)
class OpenImagesInstanceSegmentationChallengeEvaluator(
OpenImagesChallengeEvaluator):
"""A class implements Open Images Instance Segmentation Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesInstanceSegmentationChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=True,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=0.0)
ObjectDetectionEvaluationState = collections.namedtuple(
'ObjectDetectionEvaluationState', [
'num_gt_instances_per_class',
'scores_per_class',
'tp_fp_labels_per_class',
'num_gt_imgs_per_class',
'num_images_correctly_detected_per_class',
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes to
ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
recall_lower_bound: lower bound of recall operating area
recall_upper_bound: upper bound of recall operating area
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing per
image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.sum_tp_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def get_internal_state(self):
"""Returns internal state of the evaluation.
NOTE: that only evaluation results will be returned
(e.g. no raw predictions or groundtruth).
Returns:
internal state of the evaluation.
"""
return ObjectDetectionEvaluationState(
self.num_gt_instances_per_class, self.scores_per_class,
self.tp_fp_labels_per_class, self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
def merge_internal_state(self, state_tuple):
"""Merges internal state of the evaluation with the current state.
Args:
state_tuple: state tuple representing evaluation state: should be of type
ObjectDetectionEvaluationState.
"""
(num_gt_instances_per_class, scores_per_class, tp_fp_labels_per_class,
num_gt_imgs_per_class, num_images_correctly_detected_per_class) = (
state_tuple)
assert self.num_class == len(num_gt_instances_per_class)
assert self.num_class == len(scores_per_class)
assert self.num_class == len(tp_fp_labels_per_class)
for i in range(self.num_class):
self.scores_per_class[i].extend(scores_per_class[i])
self.tp_fp_labels_per_class[i].extend(tp_fp_labels_per_class[i])
self.num_gt_instances_per_class[i] += num_gt_instances_per_class[i]
self.num_gt_imgs_per_class[i] += num_gt_imgs_per_class[i]
self.num_images_correctly_detected_per_class[
i] += num_images_correctly_detected_per_class[i]
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support the case
that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` groundtruth masks. The mask values range from 0
to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if groundtruth_masks is None:
num_boxes = groundtruth_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(groundtruth_masks,
axis=(1, 2)) == 0).astype(dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool)
| mask_presence_indicator, # ignore boxes without masks
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging between 0 and
1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError(
'detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes), len(detected_scores),
len(detected_class_labels))
if image_key in self.detection_keys:
logging.warning(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M, representing
M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[
groundtruth_is_group_of_list
& ~groundtruth_is_difficult_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warning(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = metrics.compute_average_precision(
precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.info(
'class %d average_precision: %f', class_index, average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = metrics.compute_average_precision(precision_within_bound,
recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap,
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class, mean_corloc)
| 63,233 | 42.851595 | 85 | py |
models | models-master/research/object_detection/utils/tf_version.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to check TensorFlow Version."""
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
def is_tf1():
"""Whether current TensorFlow Version is 1.X."""
return not tf2.enabled()
def is_tf2():
"""Whether current TensorFlow Version is 2.X."""
return tf2.enabled()
| 996 | 34.607143 | 80 | py |
models | models-master/research/object_detection/utils/np_box_list_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
class BoxListTest(tf.test.TestCase):
def test_invalid_box_data(self):
with self.assertRaises(ValueError):
np_box_list.BoxList([0, 0, 1, 1])
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(boxlist.has_field('boxes'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertFalse(boxlist.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
with self.assertRaises(ValueError):
boxlist.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
def test_add_already_existed_field(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.boxlist.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
boxlist = self.boxlist
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
boxlist = self.boxlist
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels')))
def test_get_extra_fields(self):
boxlist = self.boxlist
self.assertItemsEqual(boxlist.get_extra_fields(), [])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.boxlist.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
expected_num_boxes = 2
self.assertEquals(boxlist.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
| 5,297 | 36.842857 | 80 | py |
models | models-master/research/object_detection/utils/spatial_transform_ops.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spatial transformation ops like RoIAlign, CropAndResize."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def _coordinate_vector_1d(start, end, size, align_endpoints):
"""Generates uniformly spaced coordinate vector.
Args:
start: A float tensor of shape [batch, num_boxes] indicating start values.
end: A float tensor of shape [batch, num_boxes] indicating end values.
size: Number of points in coordinate vector.
align_endpoints: Whether to align first and last points exactly to
endpoints.
Returns:
A 3D float tensor of shape [batch, num_boxes, size] containing grid
coordinates.
"""
start = tf.expand_dims(start, -1)
end = tf.expand_dims(end, -1)
length = end - start
if align_endpoints:
relative_grid_spacing = tf.linspace(0.0, 1.0, size)
offset = 0 if size > 1 else length / 2
else:
relative_grid_spacing = tf.linspace(0.0, 1.0, size + 1)[:-1]
offset = length / (2 * size)
relative_grid_spacing = tf.reshape(relative_grid_spacing, [1, 1, size])
relative_grid_spacing = tf.cast(relative_grid_spacing, dtype=start.dtype)
absolute_grid = start + offset + relative_grid_spacing * length
return absolute_grid
def box_grid_coordinate_vectors(boxes, size_y, size_x, align_corners=False):
"""Generates coordinate vectors for a `size x size` grid in boxes.
Each box is subdivided uniformly into a grid consisting of size x size
rectangular cells. This function returns coordinate vectors describing
the center of each cell.
If `align_corners` is true, grid points are uniformly spread such that the
corner points on the grid exactly overlap corners of the boxes.
Note that output coordinates are expressed in the same coordinate frame as
input boxes.
Args:
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax].
size_y: Size of the grid in y axis.
size_x: Size of the grid in x axis.
align_corners: Whether to align the corner grid points exactly with box
corners.
Returns:
box_grid_y: A float tensor of shape [batch, num_boxes, size_y] containing y
coordinates for grid points.
box_grid_x: A float tensor of shape [batch, num_boxes, size_x] containing x
coordinates for grid points.
"""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=-1)
box_grid_y = _coordinate_vector_1d(ymin, ymax, size_y, align_corners)
box_grid_x = _coordinate_vector_1d(xmin, xmax, size_x, align_corners)
return box_grid_y, box_grid_x
def feature_grid_coordinate_vectors(box_grid_y, box_grid_x):
"""Returns feature grid point coordinate vectors for bilinear interpolation.
Box grid is specified in absolute coordinate system with origin at left top
(0, 0). The returned coordinate vectors contain 0-based feature point indices.
This function snaps each point in the box grid to nearest 4 points on the
feature map.
In this function we also follow the convention of treating feature pixels as
point objects with no spatial extent.
Args:
box_grid_y: A float tensor of shape [batch, num_boxes, size] containing y
coordinate vector of the box grid.
box_grid_x: A float tensor of shape [batch, num_boxes, size] containing x
coordinate vector of the box grid.
Returns:
feature_grid_y0: An int32 tensor of shape [batch, num_boxes, size]
containing y coordinate vector for the top neighbors.
feature_grid_x0: A int32 tensor of shape [batch, num_boxes, size]
containing x coordinate vector for the left neighbors.
feature_grid_y1: A int32 tensor of shape [batch, num_boxes, size]
containing y coordinate vector for the bottom neighbors.
feature_grid_x1: A int32 tensor of shape [batch, num_boxes, size]
containing x coordinate vector for the right neighbors.
"""
feature_grid_y0 = tf.floor(box_grid_y)
feature_grid_x0 = tf.floor(box_grid_x)
feature_grid_y1 = tf.floor(box_grid_y + 1)
feature_grid_x1 = tf.floor(box_grid_x + 1)
feature_grid_y0 = tf.cast(feature_grid_y0, dtype=tf.int32)
feature_grid_y1 = tf.cast(feature_grid_y1, dtype=tf.int32)
feature_grid_x0 = tf.cast(feature_grid_x0, dtype=tf.int32)
feature_grid_x1 = tf.cast(feature_grid_x1, dtype=tf.int32)
return (feature_grid_y0, feature_grid_x0, feature_grid_y1, feature_grid_x1)
def _valid_indicator(feature_grid_y, feature_grid_x, true_feature_shapes):
"""Computes a indicator vector for valid indices.
Computes an indicator vector which is true for points on feature map and
false for points off feature map.
Args:
feature_grid_y: An int32 tensor of shape [batch, num_boxes, size_y]
containing y coordinate vector.
feature_grid_x: An int32 tensor of shape [batch, num_boxes, size_x]
containing x coordinate vector.
true_feature_shapes: A int32 tensor of shape [batch, num_boxes, 2]
containing valid height and width of feature maps. Feature maps are
assumed to be aligned to the left top corner.
Returns:
indices: A 1D bool tensor indicating valid feature indices.
"""
height = tf.cast(true_feature_shapes[:, :, 0:1], dtype=feature_grid_y.dtype)
width = tf.cast(true_feature_shapes[:, :, 1:2], dtype=feature_grid_x.dtype)
valid_indicator = tf.logical_and(
tf.expand_dims(
tf.logical_and(feature_grid_y >= 0, tf.less(feature_grid_y, height)),
3),
tf.expand_dims(
tf.logical_and(feature_grid_x >= 0, tf.less(feature_grid_x, width)),
2))
return tf.reshape(valid_indicator, [-1])
def ravel_indices(feature_grid_y, feature_grid_x, num_levels, height, width,
box_levels):
"""Returns grid indices in a flattened feature map of shape [-1, channels].
The returned 1-D array can be used to gather feature grid points from a
feature map that has been flattened from [batch, num_levels, max_height,
max_width, channels] to [batch * num_levels * max_height * max_width,
channels].
Args:
feature_grid_y: An int32 tensor of shape [batch, num_boxes, size_y]
containing y coordinate vector.
feature_grid_x: An int32 tensor of shape [batch, num_boxes, size_x]
containing x coordinate vector.
num_levels: Number of feature levels.
height: An integer indicating the padded height of feature maps.
width: An integer indicating the padded width of feature maps.
box_levels: An int32 tensor of shape [batch, num_boxes] indicating
feature level assigned to each box.
Returns:
indices: A 1D int32 tensor containing feature point indices in a flattened
feature grid.
"""
num_boxes = tf.shape(feature_grid_y)[1]
batch_size = tf.shape(feature_grid_y)[0]
size_y = tf.shape(feature_grid_y)[2]
size_x = tf.shape(feature_grid_x)[2]
height_dim_offset = width
level_dim_offset = height * height_dim_offset
batch_dim_offset = num_levels * level_dim_offset
batch_dim_indices = (
tf.reshape(
tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]) *
tf.ones([1, num_boxes, size_y, size_x], dtype=tf.int32))
box_level_indices = (
tf.reshape(box_levels * level_dim_offset, [batch_size, num_boxes, 1, 1]) *
tf.ones([1, 1, size_y, size_x], dtype=tf.int32))
height_indices = (
tf.reshape(feature_grid_y * height_dim_offset,
[batch_size, num_boxes, size_y, 1]) *
tf.ones([1, 1, 1, size_x], dtype=tf.int32))
width_indices = (
tf.reshape(feature_grid_x, [batch_size, num_boxes, 1, size_x])
* tf.ones([1, 1, size_y, 1], dtype=tf.int32))
indices = (
batch_dim_indices + box_level_indices + height_indices + width_indices)
flattened_indices = tf.reshape(indices, [-1])
return flattened_indices
def pad_to_max_size(features):
"""Pads features to max height and max width and stacks them up.
Args:
features: A list of num_levels 4D float tensors of shape [batch, height_i,
width_i, channels] containing feature maps.
Returns:
stacked_features: A 5D float tensor of shape [batch, num_levels, max_height,
max_width, channels] containing stacked features.
true_feature_shapes: A 2D int32 tensor of shape [num_levels, 2] containing
height and width of the feature maps before padding.
"""
if len(features) == 1:
return tf.expand_dims(features[0],
1), tf.expand_dims(tf.shape(features[0])[1:3], 0)
if all([feature.shape.is_fully_defined() for feature in features]):
heights = [feature.shape[1] for feature in features]
widths = [feature.shape[2] for feature in features]
max_height = max(heights)
max_width = max(widths)
else:
heights = [tf.shape(feature)[1] for feature in features]
widths = [tf.shape(feature)[2] for feature in features]
max_height = tf.reduce_max(heights)
max_width = tf.reduce_max(widths)
features_all = [
tf.image.pad_to_bounding_box(feature, 0, 0, max_height,
max_width) for feature in features
]
features_all = tf.stack(features_all, axis=1)
true_feature_shapes = tf.stack([tf.shape(feature)[1:3]
for feature in features])
return features_all, true_feature_shapes
def _gather_valid_indices(tensor, indices, padding_value=0.0):
"""Gather values for valid indices.
TODO(rathodv): We can't use ops.gather_with_padding_values due to cyclic
dependency. Start using it after migrating all users of spatial ops to import
this module directly rather than util/ops.py
Args:
tensor: A tensor to gather valid values from.
indices: A 1-D int32 tensor containing indices along axis 0 of `tensor`.
Invalid indices must be marked with -1.
padding_value: Value to return for invalid indices.
Returns:
A tensor sliced based on indices. For indices that are equal to -1, returns
rows of padding value.
"""
padded_tensor = tf.concat(
[
padding_value *
tf.ones([1, tf.shape(tensor)[-1]], dtype=tensor.dtype), tensor
],
axis=0,
)
# tf.concat gradient op uses tf.where(condition) (which is not
# supported on TPU) when the inputs to it are tf.IndexedSlices instead of
# tf.Tensor. Since gradient op for tf.gather returns tf.IndexedSlices,
# we add a dummy op inbetween tf.concat and tf.gather to ensure tf.concat
# gradient function gets tf.Tensor inputs and not tf.IndexedSlices.
padded_tensor *= 1.0
return tf.gather(padded_tensor, indices + 1)
def multilevel_roi_align(features, boxes, box_levels, output_size,
num_samples_per_cell_y=1, num_samples_per_cell_x=1,
align_corners=False, extrapolation_value=0.0,
scope=None):
"""Applies RoI Align op and returns feature for boxes.
Given multiple features maps indexed by different levels, and a set of boxes
where each box is mapped to a certain level, this function selectively crops
and resizes boxes from the corresponding feature maps.
We follow the RoI Align technique in https://arxiv.org/pdf/1703.06870.pdf
figure 3. Specifically, each box is subdivided uniformly into a grid
consisting of output_size[0] x output_size[1] rectangular cells. Within each
cell we select `num_points` points uniformly and compute feature values using
bilinear interpolation. Finally, we average pool the interpolated values in
each cell to obtain a [output_size[0], output_size[1], channels] feature.
If `align_corners` is true, sampling points are uniformly spread such that
corner points exactly overlap corners of the boxes.
In this function we also follow the convention of treating feature pixels as
point objects with no spatial extent.
Args:
features: A list of 4D float tensors of shape [batch_size, max_height,
max_width, channels] containing features. Note that each feature map must
have the same number of channels.
boxes: A 3D float tensor of shape [batch_size, num_boxes, 4] containing
boxes of the form [ymin, xmin, ymax, xmax] in normalized coordinates.
box_levels: A 3D int32 tensor of shape [batch_size, num_boxes]
representing the feature level index for each box.
output_size: An list of two integers [size_y, size_x] indicating the output
feature size for each box.
num_samples_per_cell_y: Number of grid points to sample along y axis in each
cell.
num_samples_per_cell_x: Number of grid points to sample along x axis in each
cell.
align_corners: Whether to align the corner grid points exactly with box
corners.
extrapolation_value: a float value to use for extrapolation.
scope: Scope name to use for this op.
Returns:
A 5D float tensor of shape [batch_size, num_boxes, output_size[0],
output_size[1], channels] representing the cropped features.
"""
with tf.name_scope(scope, 'MultiLevelRoIAlign'):
features, true_feature_shapes = pad_to_max_size(features)
batch_size = shape_utils.combined_static_and_dynamic_shape(features)[0]
num_levels = features.get_shape().as_list()[1]
max_feature_height = tf.shape(features)[2]
max_feature_width = tf.shape(features)[3]
num_filters = features.get_shape().as_list()[4]
num_boxes = tf.shape(boxes)[1]
# Convert boxes to absolute co-ordinates.
true_feature_shapes = tf.cast(true_feature_shapes, dtype=boxes.dtype)
true_feature_shapes = tf.gather(true_feature_shapes, box_levels)
boxes *= tf.concat([true_feature_shapes - 1] * 2, axis=-1)
size_y = output_size[0] * num_samples_per_cell_y
size_x = output_size[1] * num_samples_per_cell_x
box_grid_y, box_grid_x = box_grid_coordinate_vectors(
boxes, size_y=size_y, size_x=size_x, align_corners=align_corners)
(feature_grid_y0, feature_grid_x0, feature_grid_y1,
feature_grid_x1) = feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
feature_grid_y = tf.reshape(
tf.stack([feature_grid_y0, feature_grid_y1], axis=3),
[batch_size, num_boxes, -1])
feature_grid_x = tf.reshape(
tf.stack([feature_grid_x0, feature_grid_x1], axis=3),
[batch_size, num_boxes, -1])
feature_coordinates = ravel_indices(feature_grid_y, feature_grid_x,
num_levels, max_feature_height,
max_feature_width, box_levels)
valid_indices = _valid_indicator(feature_grid_y, feature_grid_x,
true_feature_shapes)
feature_coordinates = tf.where(valid_indices, feature_coordinates,
-1 * tf.ones_like(feature_coordinates))
flattened_features = tf.reshape(features, [-1, num_filters])
flattened_feature_values = _gather_valid_indices(flattened_features,
feature_coordinates,
extrapolation_value)
features_per_box = tf.reshape(
flattened_feature_values,
[batch_size, num_boxes, size_y * 2, size_x * 2, num_filters])
# Cast tensors into dtype of features.
box_grid_y = tf.cast(box_grid_y, dtype=features_per_box.dtype)
box_grid_x = tf.cast(box_grid_x, dtype=features_per_box.dtype)
feature_grid_y0 = tf.cast(feature_grid_y0, dtype=features_per_box.dtype)
feature_grid_x0 = tf.cast(feature_grid_x0, dtype=features_per_box.dtype)
# RoI Align operation is a bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3 onto point y, x given by
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
#
# Unrolling the matrix multiplies gives us:
# f(y, x) = (hy * hx) f00 + (hy * lx) f01 + (ly * hx) f10 + (lx * ly) f11
# f(y, x) = w00 * f00 + w01 * f01 + w10 * f10 + w11 * f11
#
# This can be computed by applying pointwise multiplication and sum_pool in
# a 2x2 window.
ly = box_grid_y - feature_grid_y0
lx = box_grid_x - feature_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_y = tf.reshape(
tf.stack([hy, ly], axis=3), [batch_size, num_boxes, size_y * 2, 1])
kernel_x = tf.reshape(
tf.stack([hx, lx], axis=3), [batch_size, num_boxes, 1, size_x * 2])
# Multiplier 4 is to make tf.nn.avg_pool behave like sum_pool.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.expand_dims(interpolation_kernel, axis=4),
features_per_box = tf.reshape(
features_per_box,
[batch_size * num_boxes, size_y * 2, size_x * 2, num_filters])
# This combines the two pooling operations - sum_pool to perform bilinear
# interpolation and avg_pool to pool the values in each bin.
features_per_box = tf.nn.avg_pool(
features_per_box,
[1, num_samples_per_cell_y * 2, num_samples_per_cell_x * 2, 1],
[1, num_samples_per_cell_y * 2, num_samples_per_cell_x * 2, 1], 'VALID')
features_per_box = tf.reshape(
features_per_box,
[batch_size, num_boxes, output_size[0], output_size[1], num_filters])
return features_per_box
def multilevel_native_crop_and_resize(images, boxes, box_levels,
crop_size, scope=None):
"""Multilevel native crop and resize.
Same as `multilevel_matmul_crop_and_resize` but uses tf.image.crop_and_resize.
Args:
images: A list of 4-D tensor of shape
[batch, image_height, image_width, depth] representing features of
different size.
boxes: A `Tensor` of type `float32`.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
box_levels: A 2-D tensor of shape [batch, num_boxes] representing the level
of the box.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
scope: A name for the operation (optional).
Returns:
A 5-D float tensor of shape `[batch, num_boxes, crop_height, crop_width,
depth]`
"""
if box_levels is None:
return native_crop_and_resize(images[0], boxes, crop_size, scope)
with tf.name_scope('MultiLevelNativeCropAndResize'):
cropped_feature_list = []
for level, image in enumerate(images):
# For each level, crop the feature according to all boxes
# set the cropped feature not at this level to 0 tensor.
# Consider more efficient way of computing cropped features.
cropped = native_crop_and_resize(image, boxes, crop_size, scope)
cond = tf.tile(
tf.equal(box_levels, level)[:, :, tf.newaxis],
[1, 1] + [tf.math.reduce_prod(cropped.shape.as_list()[2:])])
cond = tf.reshape(cond, cropped.shape)
cropped_final = tf.where(cond, cropped, tf.zeros_like(cropped))
cropped_feature_list.append(cropped_final)
return tf.math.reduce_sum(cropped_feature_list, axis=0)
def native_crop_and_resize(image, boxes, crop_size, scope=None):
"""Same as `matmul_crop_and_resize` but uses tf.image.crop_and_resize."""
def get_box_inds(proposals):
proposals_shape = proposals.shape.as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
with tf.name_scope(scope, 'CropAndResize'):
cropped_regions = tf.image.crop_and_resize(
image, tf.reshape(boxes, [-1] + boxes.shape.as_list()[2:]),
get_box_inds(boxes), crop_size)
final_shape = tf.concat([tf.shape(boxes)[:2],
tf.shape(cropped_regions)[1:]], axis=0)
return tf.reshape(cropped_regions, final_shape)
def multilevel_matmul_crop_and_resize(images, boxes, box_levels, crop_size,
extrapolation_value=0.0, scope=None):
"""Multilevel matmul crop and resize.
Same as `matmul_crop_and_resize` but crop images according to box levels.
Args:
images: A list of 4-D tensor of shape
[batch, image_height, image_width, depth] representing features of
different size.
boxes: A `Tensor` of type `float32` or 'bfloat16'.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
box_levels: A 2-D tensor of shape [batch, num_boxes] representing the level
of the box.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
extrapolation_value: A float value to use for extrapolation.
scope: A name for the operation (optional).
Returns:
A 5-D float tensor of shape `[batch, num_boxes, crop_height, crop_width,
depth]`
"""
with tf.name_scope(scope, 'MultiLevelMatMulCropAndResize'):
if box_levels is None:
box_levels = tf.zeros(tf.shape(boxes)[:2], dtype=tf.int32)
return multilevel_roi_align(images,
boxes,
box_levels,
crop_size,
align_corners=True,
extrapolation_value=extrapolation_value)
def matmul_crop_and_resize(image, boxes, crop_size, extrapolation_value=0.0,
scope=None):
"""Matrix multiplication based implementation of the crop and resize op.
Extracts crops from the input image tensor and bilinearly resizes them
(possibly with aspect ratio change) to a common output size specified by
crop_size. This is more general than the crop_to_bounding_box op which
extracts a fixed size slice from the input image and does not allow
resizing or aspect ratio change.
Returns a tensor with crops from the input image at positions defined at
the bounding box locations in boxes. The cropped boxes are all resized
(with bilinear interpolation) to a fixed size = `[crop_height, crop_width]`.
The result is a 5-D tensor `[batch, num_boxes, crop_height, crop_width,
depth]`.
Note that this operation is meant to replicate the behavior of the standard
tf.image.crop_and_resize operation but there are a few differences.
Specifically:
1) There is no `box_indices` argument --- to run this op on multiple images,
one must currently call this op independently on each image.
2) The `crop_size` parameter is assumed to be statically defined.
Moreover, the number of boxes must be strictly nonzero.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, 'bfloat16', `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32` or 'bfloat16'.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
extrapolation_value: a float value to use for extrapolation.
scope: A name for the operation (optional).
Returns:
A 5-D tensor of shape `[batch, num_boxes, crop_height, crop_width, depth]`
"""
with tf.name_scope(scope, 'MatMulCropAndResize'):
box_levels = tf.zeros(tf.shape(boxes)[:2], dtype=tf.int32)
return multilevel_roi_align([image],
boxes,
box_levels,
crop_size,
align_corners=True,
extrapolation_value=extrapolation_value)
| 26,797 | 44.730375 | 80 | py |
models | models-master/research/object_detection/utils/test_case.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A convenience wrapper around tf.test.TestCase to test with TPU, TF1, TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
from object_detection.utils import tf_version
if not tf2.enabled():
from tensorflow.contrib import tpu as contrib_tpu # pylint: disable=g-import-not-at-top, line-too-long
flags = tf.app.flags
flags.DEFINE_bool('tpu_test', False, 'Deprecated Flag.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Base Test class to handle execution under {TF1.X, TF2.X} x {TPU, CPU}.
This class determines the TF version and availability of TPUs to set up
tests appropriately.
"""
def maybe_extract_single_output(self, outputs):
if isinstance(outputs, list) or isinstance(outputs, tuple):
if isinstance(outputs[0], tf.Tensor):
outputs_np = [output.numpy() for output in outputs]
else:
outputs_np = outputs
if len(outputs_np) == 1:
return outputs_np[0]
else:
return outputs_np
else:
if isinstance(outputs, tf.Tensor):
return outputs.numpy()
else:
return outputs
def has_tpu(self):
"""Returns whether there are any logical TPU devices."""
return bool(tf.config.experimental.list_logical_devices(device_type='TPU'))
def is_tf2(self):
"""Returns whether TF2 is enabled."""
return tf_version.is_tf2()
def execute_tpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
def wrap_graph_fn(*args, **kwargs):
results = compute_fn(*args, **kwargs)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor))
and hasattr(results, '__iter__')):
results = list(results)
return results
tpu_computation = contrib_tpu.rewrite(wrap_graph_fn, placeholders)
sess.run(contrib_tpu.initialize_system())
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(tpu_computation,
feed_dict=dict(zip(placeholders, inputs)))
sess.run(contrib_tpu.shutdown_system())
return self.maybe_extract_single_output(materialized_results)
def execute_tpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on TPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=1)
strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=device_assignment)
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return strategy.run(compute_fn, args=tf_inputs)
outputs = run()
tf.tpu.experimental.shutdown_tpu_system()
return self.maybe_extract_single_output(outputs)
def execute_cpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = compute_fn(*placeholders)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor)) and
hasattr(results, '__iter__')):
results = list(results)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
return self.maybe_extract_single_output(materialized_results)
def execute_cpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on CPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return compute_fn(*tf_inputs)
return self.maybe_extract_single_output(run())
def execute_cpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
return self.execute_cpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute_tpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.has_tpu():
raise ValueError('No TPU Device found.')
if self.is_tf2():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_tpu_tf1(compute_fn, inputs, graph)
def execute_tf2(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 2.0.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
if self.has_tpu():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf2(compute_fn, inputs)
def execute_tf1(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with TensorFlow 1.X.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
if self.has_tpu():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with inputs and returns results.
* Executes in either TF1.X or TF2.X style based on the TensorFlow version.
* Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.has_tpu() and tf2.enabled():
return self.execute_tpu_tf2(compute_fn, inputs)
elif not self.has_tpu() and tf2.enabled():
return self.execute_cpu_tf2(compute_fn, inputs)
elif self.has_tpu() and not tf2.enabled():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
| 11,265 | 38.391608 | 105 | py |
models | models-master/research/object_detection/utils/np_box_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas
| 3,431 | 32.320388 | 80 | py |
models | models-master/research/object_detection/utils/test_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions which are convenient for unit testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import anchor_generator
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_predictor
from object_detection.core import matcher
from object_detection.utils import shape_utils
from object_detection.utils import tf_version
# Default size (both width and height) used for testing mask predictions.
DEFAULT_MASK_SIZE = 5
class MockBoxCoder(box_coder.BoxCoder):
"""Simple `difference` BoxCoder."""
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return boxes.get() - anchors.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes + anchors.get())
class MockMaskHead(object):
"""Simple maskhead that returns all zeros as mask predictions."""
def __init__(self, num_classes):
self._num_classes = num_classes
def predict(self, features):
batch_size = tf.shape(features)[0]
return tf.zeros((batch_size, 1, self._num_classes, DEFAULT_MASK_SIZE,
DEFAULT_MASK_SIZE),
dtype=tf.float32)
class MockBoxPredictor(box_predictor.BoxPredictor):
"""Simple box predictor that ignores inputs and outputs all zeros."""
def __init__(self, is_training, num_classes, add_background_class=True):
super(MockBoxPredictor, self).__init__(is_training, num_classes)
self._add_background_class = add_background_class
def _predict(self, image_features, num_predictions_per_location):
image_feature = image_features[0]
combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
image_feature)
batch_size = combined_feature_shape[0]
num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
code_size = 4
zero = tf.reduce_sum(0 * image_feature)
num_class_slots = self.num_classes
if self._add_background_class:
num_class_slots = num_class_slots + 1
box_encodings = zero + tf.zeros(
(batch_size, num_anchors, 1, code_size), dtype=tf.float32)
class_predictions_with_background = zero + tf.zeros(
(batch_size, num_anchors, num_class_slots), dtype=tf.float32)
predictions_dict = {
box_predictor.BOX_ENCODINGS:
box_encodings,
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background
}
return predictions_dict
class MockKerasBoxPredictor(box_predictor.KerasBoxPredictor):
"""Simple box predictor that ignores inputs and outputs all zeros."""
def __init__(self, is_training, num_classes, add_background_class=True):
super(MockKerasBoxPredictor, self).__init__(
is_training, num_classes, False, False)
self._add_background_class = add_background_class
# Dummy variable so that box predictor registers some variables.
self._dummy_var = tf.Variable(0.0, trainable=True,
name='box_predictor_var')
def _predict(self, image_features, **kwargs):
image_feature = image_features[0]
combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
image_feature)
batch_size = combined_feature_shape[0]
num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
code_size = 4
zero = tf.reduce_sum(0 * image_feature)
num_class_slots = self.num_classes
if self._add_background_class:
num_class_slots = num_class_slots + 1
box_encodings = zero + tf.zeros(
(batch_size, num_anchors, 1, code_size), dtype=tf.float32)
class_predictions_with_background = zero + tf.zeros(
(batch_size, num_anchors, num_class_slots), dtype=tf.float32)
predictions_dict = {
box_predictor.BOX_ENCODINGS:
box_encodings,
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background
}
return predictions_dict
class MockAnchorGenerator(anchor_generator.AnchorGenerator):
"""Mock anchor generator."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list):
num_anchors = sum([shape[0] * shape[1] for shape in feature_map_shape_list])
return box_list.BoxList(tf.zeros((num_anchors, 4), dtype=tf.float32))
class MockMatcher(matcher.Matcher):
"""Simple matcher that matches first anchor to first groundtruth box."""
def _match(self, similarity_matrix, valid_rows):
return tf.constant([0, -1, -1, -1], dtype=tf.int32)
def create_diagonal_gradient_image(height, width, depth):
"""Creates pyramid image. Useful for testing.
For example, pyramid_image(5, 6, 1) looks like:
# [[[ 5. 4. 3. 2. 1. 0.]
# [ 6. 5. 4. 3. 2. 1.]
# [ 7. 6. 5. 4. 3. 2.]
# [ 8. 7. 6. 5. 4. 3.]
# [ 9. 8. 7. 6. 5. 4.]]]
Args:
height: height of image
width: width of image
depth: depth of image
Returns:
pyramid image
"""
row = np.arange(height)
col = np.arange(width)[::-1]
image_layer = np.expand_dims(row, 1) + col
image_layer = np.expand_dims(image_layer, 2)
image = image_layer
for i in range(1, depth):
image = np.concatenate((image, image_layer * pow(10, i)), 2)
return image.astype(np.float32)
def create_random_boxes(num_boxes, max_height, max_width):
"""Creates random bounding boxes of specific maximum height and width.
Args:
num_boxes: number of boxes.
max_height: maximum height of boxes.
max_width: maximum width of boxes.
Returns:
boxes: numpy array of shape [num_boxes, 4]. Each row is in form
[y_min, x_min, y_max, x_max].
"""
y_1 = np.random.uniform(size=(1, num_boxes)) * max_height
y_2 = np.random.uniform(size=(1, num_boxes)) * max_height
x_1 = np.random.uniform(size=(1, num_boxes)) * max_width
x_2 = np.random.uniform(size=(1, num_boxes)) * max_width
boxes = np.zeros(shape=(num_boxes, 4))
boxes[:, 0] = np.minimum(y_1, y_2)
boxes[:, 1] = np.minimum(x_1, x_2)
boxes[:, 2] = np.maximum(y_1, y_2)
boxes[:, 3] = np.maximum(x_1, x_2)
return boxes.astype(np.float32)
def first_rows_close_as_set(a, b, k=None, rtol=1e-6, atol=1e-6):
"""Checks if first K entries of two lists are close, up to permutation.
Inputs to this assert are lists of items which can be compared via
numpy.allclose(...) and can be sorted.
Args:
a: list of items which can be compared via numpy.allclose(...) and are
sortable.
b: list of items which can be compared via numpy.allclose(...) and are
sortable.
k: a non-negative integer. If not provided, k is set to be len(a).
rtol: relative tolerance.
atol: absolute tolerance.
Returns:
boolean, True if input lists a and b have the same length and
the first k entries of the inputs satisfy numpy.allclose() after
sorting entries.
"""
if not isinstance(a, list) or not isinstance(b, list) or len(a) != len(b):
return False
if not k:
k = len(a)
k = min(k, len(a))
a_sorted = sorted(a[:k])
b_sorted = sorted(b[:k])
return all([
np.allclose(entry_a, entry_b, rtol, atol)
for (entry_a, entry_b) in zip(a_sorted, b_sorted)
])
class GraphContextOrNone(object):
"""A new Graph context for TF1.X and None for TF2.X.
This is useful to write model tests that work with both TF1.X and TF2.X.
Example test using this pattern:
class ModelTest(test_case.TestCase):
def test_model(self):
with test_utils.GraphContextOrNone() as g:
model = Model()
def compute_fn():
out = model.predict()
return out['detection_boxes']
boxes = self.execute(compute_fn, [], graph=g)
self.assertAllClose(boxes, expected_boxes)
"""
def __init__(self):
if tf_version.is_tf2():
self.graph = None
else:
self.graph = tf.Graph().as_default()
def __enter__(self):
if tf_version.is_tf2():
return None
else:
return self.graph.__enter__()
def __exit__(self, ttype, value, traceback):
if tf_version.is_tf2():
return False
else:
return self.graph.__exit__(ttype, value, traceback)
def image_with_dynamic_shape(height, width, channels):
"""Returns a single image with dynamic shape."""
h = tf.random.uniform([], minval=height, maxval=height+1, dtype=tf.int32)
w = tf.random.uniform([], minval=width, maxval=width+1, dtype=tf.int32)
image = tf.random.uniform([h, w, channels])
return image
def keypoints_with_dynamic_shape(num_instances, num_keypoints, num_coordinates):
"""Returns keypoints with dynamic shape."""
n = tf.random.uniform([], minval=num_instances, maxval=num_instances+1,
dtype=tf.int32)
keypoints = tf.random.uniform([n, num_keypoints, num_coordinates])
return keypoints
| 9,746 | 32.153061 | 80 | py |
models | models-master/research/object_detection/utils/config_util_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.config_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import eval_pb2
from object_detection.protos import image_resizer_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def _write_config(config, config_path):
"""Writes a config object to disk."""
config_text = text_format.MessageToString(config)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
def _update_optimizer_with_constant_learning_rate(optimizer, learning_rate):
"""Adds a new constant learning rate."""
constant_lr = optimizer.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
def _update_optimizer_with_exponential_decay_learning_rate(
optimizer, learning_rate):
"""Adds a new exponential decay learning rate."""
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
exponential_lr.initial_learning_rate = learning_rate
def _update_optimizer_with_manual_step_learning_rate(
optimizer, initial_learning_rate, learning_rate_scaling):
"""Adds a learning rate schedule."""
manual_lr = optimizer.learning_rate.manual_step_learning_rate
manual_lr.initial_learning_rate = initial_learning_rate
for i in range(3):
schedule = manual_lr.schedule.add()
schedule.learning_rate = initial_learning_rate * learning_rate_scaling**i
def _update_optimizer_with_cosine_decay_learning_rate(
optimizer, learning_rate, warmup_learning_rate):
"""Adds a new cosine decay learning rate."""
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_learning_rate
class ConfigUtilTest(tf.test.TestCase):
def _create_and_load_test_configs(self, pipeline_config):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
return config_util.get_configs_from_pipeline_file(pipeline_config_path)
def test_get_configs_from_pipeline_file(self):
"""Test that proto configs can be read from pipeline config file."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config,
configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_configs_from_pipeline_proto(self):
"""Tests creating configs dictionary from pipeline proto."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
configs = config_util.create_configs_from_pipeline_proto(pipeline_config)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_pipeline_proto_from_configs(self):
"""Tests that proto can be reconstructed from configs dictionary."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_save_pipeline_config(self):
"""Tests that the pipeline config is properly saved to disk."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
config_util.save_pipeline_config(pipeline_config, self.get_temp_dir())
configs = config_util.get_configs_from_pipeline_file(
os.path.join(self.get_temp_dir(), "pipeline.config"))
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_get_configs_from_multiple_files(self):
"""Tests that proto configs can be read from multiple files."""
temp_dir = self.get_temp_dir()
# Write model config file.
model_config_path = os.path.join(temp_dir, "model.config")
model = model_pb2.DetectionModel()
model.faster_rcnn.num_classes = 10
_write_config(model, model_config_path)
# Write train config file.
train_config_path = os.path.join(temp_dir, "train.config")
train_config = train_config = train_pb2.TrainConfig()
train_config.batch_size = 32
_write_config(train_config, train_config_path)
# Write train input config file.
train_input_config_path = os.path.join(temp_dir, "train_input.config")
train_input_config = input_reader_pb2.InputReader()
train_input_config.label_map_path = "path/to/label_map"
_write_config(train_input_config, train_input_config_path)
# Write eval config file.
eval_config_path = os.path.join(temp_dir, "eval.config")
eval_config = eval_pb2.EvalConfig()
eval_config.num_examples = 20
_write_config(eval_config, eval_config_path)
# Write eval input config file.
eval_input_config_path = os.path.join(temp_dir, "eval_input.config")
eval_input_config = input_reader_pb2.InputReader()
eval_input_config.label_map_path = "path/to/another/label_map"
_write_config(eval_input_config, eval_input_config_path)
configs = config_util.get_configs_from_multiple_files(
model_config_path=model_config_path,
train_config_path=train_config_path,
train_input_config_path=train_input_config_path,
eval_config_path=eval_config_path,
eval_input_config_path=eval_input_config_path)
self.assertProtoEquals(model, configs["model"])
self.assertProtoEquals(train_config, configs["train_config"])
self.assertProtoEquals(train_input_config,
configs["train_input_config"])
self.assertProtoEquals(eval_config, configs["eval_config"])
self.assertProtoEquals(eval_input_config, configs["eval_input_configs"][0])
def _assertOptimizerWithNewLearningRate(self, optimizer_name):
"""Asserts successful updating of all learning rate schemes."""
original_learning_rate = 0.7
learning_rate_scaling = 0.1
warmup_learning_rate = 0.07
hparams = contrib_training.HParams(learning_rate=0.15)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
# Constant learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_constant_learning_rate(optimizer,
original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
constant_lr = optimizer.learning_rate.constant_learning_rate
self.assertAlmostEqual(hparams.learning_rate, constant_lr.learning_rate)
# Exponential decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_exponential_decay_learning_rate(
optimizer, original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
exponential_lr.initial_learning_rate)
# Manual step learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_manual_step_learning_rate(
optimizer, original_learning_rate, learning_rate_scaling)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
manual_lr = optimizer.learning_rate.manual_step_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
manual_lr.initial_learning_rate)
for i, schedule in enumerate(manual_lr.schedule):
self.assertAlmostEqual(hparams.learning_rate * learning_rate_scaling**i,
schedule.learning_rate)
# Cosine decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_cosine_decay_learning_rate(optimizer,
original_learning_rate,
warmup_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate, cosine_lr.learning_rate_base)
warmup_scale_factor = warmup_learning_rate / original_learning_rate
self.assertAlmostEqual(hparams.learning_rate * warmup_scale_factor,
cosine_lr.warmup_learning_rate)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testRMSPropWithNewLearingRate(self):
"""Tests new learning rates for RMSProp Optimizer."""
self._assertOptimizerWithNewLearningRate("rms_prop_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testMomentumOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Momentum Optimizer."""
self._assertOptimizerWithNewLearningRate("momentum_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testAdamOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Adam Optimizer."""
self._assertOptimizerWithNewLearningRate("adam_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testGenericConfigOverride(self):
"""Tests generic config overrides for all top-level configs."""
# Set one parameter for each of the top-level pipeline configs:
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.num_classes = 1
pipeline_config.train_config.batch_size = 1
pipeline_config.eval_config.num_visualizations = 1
pipeline_config.train_input_reader.label_map_path = "/some/path"
pipeline_config.eval_input_reader.add().label_map_path = "/some/path"
pipeline_config.graph_rewriter.quantization.weight_bits = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Override each of the parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(
**{
"model.ssd.num_classes": 2,
"train_config.batch_size": 2,
"train_input_config.label_map_path": "/some/other/path",
"eval_config.num_visualizations": 2,
"graph_rewriter_config.quantization.weight_bits": 2
})
configs = config_util.merge_external_params_with_configs(configs, hparams)
# Ensure that the parameters have the overridden values:
self.assertEqual(2, configs["model"].ssd.num_classes)
self.assertEqual(2, configs["train_config"].batch_size)
self.assertEqual("/some/other/path",
configs["train_input_config"].label_map_path)
self.assertEqual(2, configs["eval_config"].num_visualizations)
self.assertEqual(2,
configs["graph_rewriter_config"].quantization.weight_bits)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewBatchSize(self):
"""Tests that batch size is updated appropriately."""
original_batch_size = 2
hparams = contrib_training.HParams(batch_size=16)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(16, new_batch_size)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewBatchSizeWithClipping(self):
"""Tests that batch size is clipped to 1 from below."""
original_batch_size = 2
hparams = contrib_training.HParams(batch_size=0.5)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(1, new_batch_size) # Clipped to 1.0.
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteBatchSizeWithKeyValue(self):
"""Tests that batch size is overwritten based on key/value."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
hparams = contrib_training.HParams(**{"train_config.batch_size": 10})
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(10, new_batch_size)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteSampleFromDatasetWeights(self):
"""Tests config override for sample_from_datasets_weights."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.sample_from_datasets_weights.extend(
[1, 2])
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Override parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(sample_from_datasets_weights=[0.5, 0.5])
configs = config_util.merge_external_params_with_configs(configs, hparams)
# Ensure that the parameters have the overridden values:
self.assertListEqual(
[0.5, 0.5],
list(configs["train_input_config"].sample_from_datasets_weights))
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteSampleFromDatasetWeightsWrongLength(self):
"""Tests config override for sample_from_datasets_weights."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.sample_from_datasets_weights.extend(
[1, 2])
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Try to override parameter with too many weights:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(
sample_from_datasets_weights=[0.5, 0.5, 0.5])
with self.assertRaises(
ValueError,
msg="sample_from_datasets_weights override has a different number of"
" values (3) than the configured dataset weights (2)."
):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testKeyValueOverrideBadKey(self):
"""Tests that overwriting with a bad key causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
configs = self._create_and_load_test_configs(pipeline_config)
hparams = contrib_training.HParams(**{"train_config.no_such_field": 10})
with self.assertRaises(ValueError):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteBatchSizeWithBadValueType(self):
"""Tests that overwriting with a bad valuye type causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
# Type should be an integer, but we're passing a string "10".
hparams = contrib_training.HParams(**{"train_config.batch_size": "10"})
with self.assertRaises(TypeError):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewMomentumOptimizerValue(self):
"""Tests that new momentum value is updated appropriately."""
original_momentum_value = 0.4
hparams = contrib_training.HParams(momentum_optimizer_value=1.1)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
optimizer_config.momentum_optimizer_value = original_momentum_value
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
new_momentum_value = optimizer_config.momentum_optimizer_value
self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0.
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewClassificationLocalizationWeightRatio(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_localization_weight = 0.1
original_classification_weight = 0.2
new_weight_ratio = 5.0
hparams = contrib_training.HParams(
classification_localization_weight_ratio=new_weight_ratio)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.loss.localization_weight = (
original_localization_weight)
pipeline_config.model.ssd.loss.classification_weight = (
original_classification_weight)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
loss = configs["model"].ssd.loss
self.assertAlmostEqual(1.0, loss.localization_weight)
self.assertAlmostEqual(new_weight_ratio, loss.classification_weight)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewFocalLossParameters(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_alpha = 1.0
original_gamma = 1.0
new_alpha = 0.3
new_gamma = 2.0
hparams = contrib_training.HParams(
focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
classification_loss = pipeline_config.model.ssd.loss.classification_loss
classification_loss.weighted_sigmoid_focal.alpha = original_alpha
classification_loss.weighted_sigmoid_focal.gamma = original_gamma
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
classification_loss = configs["model"].ssd.loss.classification_loss
self.assertAlmostEqual(new_alpha,
classification_loss.weighted_sigmoid_focal.alpha)
self.assertAlmostEqual(new_gamma,
classification_loss.weighted_sigmoid_focal.gamma)
def testMergingKeywordArguments(self):
"""Tests that keyword arguments get merged as do hyperparameters."""
original_num_train_steps = 100
desired_num_train_steps = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.num_steps = original_num_train_steps
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_steps": desired_num_train_steps}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_steps = configs["train_config"].num_steps
self.assertEqual(desired_num_train_steps, train_steps)
def testGetNumberOfClasses(self):
"""Tests that number of classes can be retrieved."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 20
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
number_of_classes = config_util.get_number_of_classes(configs["model"])
self.assertEqual(20, number_of_classes)
def testNewTrainInputPath(self):
"""Tests that train input path can be overwritten with single file."""
original_train_path = ["path/to/data"]
new_train_path = "another/path/to/data"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual([new_train_path], final_path)
def testNewTrainInputPathList(self):
"""Tests that train input path can be overwritten with multiple files."""
original_train_path = ["path/to/data"]
new_train_path = ["another/path/to/data", "yet/another/path/to/data"]
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual(new_train_path, final_path)
def testNewLabelMapPath(self):
"""Tests that label map path can be overwritten in input readers."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = "path//to/new/label_map"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_label_map_path,
configs["train_input_config"].label_map_path)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_label_map_path, eval_input_config.label_map_path)
def testDontOverwriteEmptyLabelMapPath(self):
"""Tests that label map path will not by overwritten with empty string."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = ""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(original_label_map_path,
configs["train_input_config"].label_map_path)
self.assertEqual(original_label_map_path,
configs["eval_input_configs"][0].label_map_path)
def testNewMaskType(self):
"""Tests that mask type can be overwritten in input readers."""
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.mask_type = original_mask_type
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.mask_type = original_mask_type
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_mask_type, configs["train_input_config"].mask_type)
self.assertEqual(new_mask_type, configs["eval_input_configs"][0].mask_type)
def testUseMovingAverageForEval(self):
use_moving_averages_orig = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_with_moving_averages": True}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(True, configs["eval_config"].use_moving_averages)
def testGetImageResizerConfig(self):
"""Tests that number of classes can be retrieved."""
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.height = 100
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.width = 300
image_resizer_config = config_util.get_image_resizer_config(model_config)
self.assertEqual(image_resizer_config.fixed_shape_resizer.height, 100)
self.assertEqual(image_resizer_config.fixed_shape_resizer.width, 300)
def testGetSpatialImageSizeFromFixedShapeResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.fixed_shape_resizer.height = 100
image_resizer_config.fixed_shape_resizer.width = 200
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [100, 200])
def testGetSpatialImageSizeFromAspectPreservingResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension = True
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [600, 600])
def testGetSpatialImageSizeFromAspectPreservingResizerDynamic(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetSpatialImageSizeFromConditionalShapeResizer(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.conditional_shape_resizer.size_threshold = 100
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetMaxNumContextFeaturesFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.max_num_context_features = 10
max_num_context_features = config_util.get_max_num_context_features(
model_config)
self.assertAllEqual(max_num_context_features, 10)
def testGetContextFeatureLengthFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.context_feature_length = 100
context_feature_length = config_util.get_context_feature_length(
model_config)
self.assertAllEqual(context_feature_length, 100)
def testEvalShuffle(self):
"""Tests that `eval_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(desired_shuffle, configs["eval_input_configs"][0].shuffle)
def testTrainShuffle(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_shuffle = configs["train_input_config"].shuffle
self.assertEqual(desired_shuffle, train_shuffle)
def testOverWriteRetainOriginalImages(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_retain_original_images = True
desired_retain_original_images = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_images = (
original_retain_original_images)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_images_in_eval": desired_retain_original_images
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_images = configs["eval_config"].retain_original_images
self.assertEqual(desired_retain_original_images, retain_original_images)
def testOverwriteAllEvalSampling(self):
original_num_eval_examples = 1
new_num_eval_examples = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"sample_1_of_n_eval_examples": new_num_eval_examples}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_eval_examples,
eval_input_config.sample_1_of_n_examples)
def testOverwriteAllEvalNumEpochs(self):
original_num_epochs = 10
new_num_epochs = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_num_epochs": new_num_epochs}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_epochs, eval_input_config.num_epochs)
def testUpdateMaskTypeForAllInputConfigs(self):
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_config = pipeline_config.train_input_reader
train_config.mask_type = original_mask_type
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.mask_type = original_mask_type
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.mask_type = original_mask_type
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(configs["train_input_config"].mask_type, new_mask_type)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(eval_input_config.mask_type, new_mask_type)
def testErrorOverwritingMultipleInputConfig(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.shuffle = original_shuffle
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.shuffle = original_shuffle
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": new_shuffle}
with self.assertRaises(ValueError):
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def testCheckAndParseInputConfigKey(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "eval_1"
pipeline_config.eval_input_reader.add().name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
specific_shuffle_update_key = "eval_input_configs:eval_2:shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, specific_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, "eval_2")
self.assertEqual(field_name, "shuffle")
legacy_shuffle_update_key = "eval_shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(configs,
legacy_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, None)
self.assertEqual(field_name, "shuffle")
non_input_config_update_key = "label_map_path"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, non_input_config_update_key))
self.assertFalse(is_valid_input_config_key)
self.assertEqual(key_name, None)
self.assertEqual(input_name, None)
self.assertEqual(field_name, "label_map_path")
with self.assertRaisesRegexp(ValueError,
"Invalid key format when overriding configs."):
config_util.check_and_parse_input_config_key(
configs, "train_input_config:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid key_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "invalid_key_name:train_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid input_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:unknown_eval_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid field_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:eval_2:unknown_field_name")
def testUpdateInputReaderConfigSuccess(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
def testUpdateInputReaderConfigErrors(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "same_eval_name"
pipeline_config.eval_input_reader.add().name = "same_eval_name"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
with self.assertRaisesRegexp(ValueError,
"Duplicate input name found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="same_eval_name",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(
ValueError, "Input name name_not_exist not found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="name_not_exist",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(ValueError,
"Unknown input config overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name=None,
field_name="shuffle",
value=False)
def testOverWriteRetainOriginalImageAdditionalChannels(self):
"""Tests that keyword arguments are applied correctly."""
original_retain_original_image_additional_channels = True
desired_retain_original_image_additional_channels = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_image_additional_channels = (
original_retain_original_image_additional_channels)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_image_additional_channels_in_eval":
desired_retain_original_image_additional_channels
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_image_additional_channels = configs[
"eval_config"].retain_original_image_additional_channels
self.assertEqual(desired_retain_original_image_additional_channels,
retain_original_image_additional_channels)
def testUpdateNumClasses(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 10)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"num_classes": 2})
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 2)
def testRemoveUnnecessaryEma(self):
input_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min/ExponentialMovingAverage":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max/ExponentialMovingAverage":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
no_ema_collection = ["/min", "/max"]
output_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
self.assertEqual(
output_dict,
config_util.remove_unnecessary_ema(input_dict, no_ema_collection))
def testUpdateRescoreInstances(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": False})
cn_config = configs["model"].center_net
self.assertEqual(
False, cn_config.keypoint_estimation_task[0].rescore_instances)
def testUpdateRescoreInstancesWithBooleanString(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": "False"})
cn_config = configs["model"].center_net
self.assertEqual(
False, cn_config.keypoint_estimation_task[0].rescore_instances)
def testUpdateRescoreInstancesWithMultipleTasks(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": False})
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
self.assertEqual(
True, cn_config.keypoint_estimation_task[1].rescore_instances)
if __name__ == "__main__":
tf.test.main()
| 51,172 | 46.164055 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_coco_tf_record.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw COCO dataset to TFRecord for object_detection.
This tool supports data generation for object detection (boxes, masks),
keypoint detection, and DensePose.
Please note that this tool creates sharded output files.
Example usage:
python create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import json
import logging
import os
import contextlib2
import numpy as np
import PIL.Image
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
tf.flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.')
tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.')
tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.')
tf.flags.DEFINE_string('train_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_annotations_file', '',
'Validation annotations JSON file.')
tf.flags.DEFINE_string('testdev_annotations_file', '',
'Test-dev annotations JSON file.')
tf.flags.DEFINE_string('train_keypoint_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_keypoint_annotations_file', '',
'Validation annotations JSON file.')
# DensePose is only available for coco 2014.
tf.flags.DEFINE_string('train_densepose_annotations_file', '',
'Training annotations JSON file for DensePose.')
tf.flags.DEFINE_string('val_densepose_annotations_file', '',
'Validation annotations JSON file for DensePose.')
tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.')
# Whether to only produce images/annotations on person class (for keypoint /
# densepose task).
tf.flags.DEFINE_boolean('remove_non_person_annotations', False, 'Whether to '
'remove all annotations for non-person objects.')
tf.flags.DEFINE_boolean('remove_non_person_images', False, 'Whether to '
'remove all examples that do not contain a person.')
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
_COCO_KEYPOINT_NAMES = [
b'nose', b'left_eye', b'right_eye', b'left_ear', b'right_ear',
b'left_shoulder', b'right_shoulder', b'left_elbow', b'right_elbow',
b'left_wrist', b'right_wrist', b'left_hip', b'right_hip',
b'left_knee', b'right_knee', b'left_ankle', b'right_ankle'
]
_COCO_PART_NAMES = [
b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot',
b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back',
b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back',
b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front',
b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front',
b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back',
b'left_lower_arm_front', b'right_lower_arm_front', b'right_face',
b'left_face',
]
_DP_PART_ID_OFFSET = 1
def clip_to_unit(x):
return min(max(x, 0.0), 1.0)
def create_tf_example(image,
annotations_list,
image_dir,
category_index,
include_masks=False,
keypoint_annotations_dict=None,
densepose_annotations_dict=None,
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
annotations_list:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed by the
'id' field of each category. See the label_map_util.create_category_index
function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
keypoint_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'keypoints', u'num_keypoints'] represeting the
keypoint information for this person object annotation. If None, then
no keypoint annotations will be populated.
densepose_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'dp_I', u'dp_x', u'dp_y', 'dp_U', 'dp_V']
representing part surface coordinates. For more information see
http://densepose.org/.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
Returns:
key: SHA256 hash of the image.
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
num_keypoint_annotation_skipped: Number of keypoint annotations that were
skipped.
num_densepose_annotation_skipped: Number of DensePose annotations that were
skipped.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
keypoints_x = []
keypoints_y = []
keypoints_visibility = []
keypoints_name = []
num_keypoints = []
include_keypoint = keypoint_annotations_dict is not None
num_annotations_skipped = 0
num_keypoint_annotation_used = 0
num_keypoint_annotation_skipped = 0
dp_part_index = []
dp_x = []
dp_y = []
dp_u = []
dp_v = []
dp_num_points = []
densepose_keys = ['dp_I', 'dp_U', 'dp_V', 'dp_x', 'dp_y', 'bbox']
include_densepose = densepose_annotations_dict is not None
num_densepose_annotation_used = 0
num_densepose_annotation_skipped = 0
for object_annotations in annotations_list:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
category_id = int(object_annotations['category_id'])
category_name = category_index[category_id]['name'].encode('utf8')
if remove_non_person_annotations and category_name != b'person':
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_ids.append(category_id)
category_names.append(category_name)
area.append(object_annotations['area'])
if include_masks:
run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
binary_mask = mask.decode(run_len_encoding)
if not object_annotations['iscrowd']:
binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
if include_keypoint:
annotation_id = object_annotations['id']
if annotation_id in keypoint_annotations_dict:
num_keypoint_annotation_used += 1
keypoint_annotations = keypoint_annotations_dict[annotation_id]
keypoints = keypoint_annotations['keypoints']
num_kpts = keypoint_annotations['num_keypoints']
keypoints_x_abs = keypoints[::3]
keypoints_x.extend(
[float(x_abs) / image_width for x_abs in keypoints_x_abs])
keypoints_y_abs = keypoints[1::3]
keypoints_y.extend(
[float(y_abs) / image_height for y_abs in keypoints_y_abs])
keypoints_visibility.extend(keypoints[2::3])
keypoints_name.extend(_COCO_KEYPOINT_NAMES)
num_keypoints.append(num_kpts)
else:
keypoints_x.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
keypoints_y.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
keypoints_visibility.extend([0] * len(_COCO_KEYPOINT_NAMES))
keypoints_name.extend(_COCO_KEYPOINT_NAMES)
num_keypoints.append(0)
if include_densepose:
annotation_id = object_annotations['id']
if (annotation_id in densepose_annotations_dict and
all(key in densepose_annotations_dict[annotation_id]
for key in densepose_keys)):
dp_annotations = densepose_annotations_dict[annotation_id]
num_densepose_annotation_used += 1
dp_num_points.append(len(dp_annotations['dp_I']))
dp_part_index.extend([int(i - _DP_PART_ID_OFFSET)
for i in dp_annotations['dp_I']])
# DensePose surface coordinates are defined on a [256, 256] grid
# relative to each instance box (i.e. absolute coordinates in range
# [0., 256.]). The following converts the coordinates
# so that they are expressed in normalized image coordinates.
dp_x_box_rel = [
clip_to_unit(val / 256.) for val in dp_annotations['dp_x']]
dp_x_norm = [(float(x) + x_box_rel * width) / image_width
for x_box_rel in dp_x_box_rel]
dp_y_box_rel = [
clip_to_unit(val / 256.) for val in dp_annotations['dp_y']]
dp_y_norm = [(float(y) + y_box_rel * height) / image_height
for y_box_rel in dp_y_box_rel]
dp_x.extend(dp_x_norm)
dp_y.extend(dp_y_norm)
dp_u.extend(dp_annotations['dp_U'])
dp_v.extend(dp_annotations['dp_V'])
else:
dp_num_points.append(0)
if (remove_non_person_images and
not any(name == b'person' for name in category_names)):
return (key, None, num_annotations_skipped,
num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
if include_keypoint:
feature_dict['image/object/keypoint/x'] = (
dataset_util.float_list_feature(keypoints_x))
feature_dict['image/object/keypoint/y'] = (
dataset_util.float_list_feature(keypoints_y))
feature_dict['image/object/keypoint/num'] = (
dataset_util.int64_list_feature(num_keypoints))
feature_dict['image/object/keypoint/visibility'] = (
dataset_util.int64_list_feature(keypoints_visibility))
feature_dict['image/object/keypoint/text'] = (
dataset_util.bytes_list_feature(keypoints_name))
num_keypoint_annotation_skipped = (
len(keypoint_annotations_dict) - num_keypoint_annotation_used)
if include_densepose:
feature_dict['image/object/densepose/num'] = (
dataset_util.int64_list_feature(dp_num_points))
feature_dict['image/object/densepose/part_index'] = (
dataset_util.int64_list_feature(dp_part_index))
feature_dict['image/object/densepose/x'] = (
dataset_util.float_list_feature(dp_x))
feature_dict['image/object/densepose/y'] = (
dataset_util.float_list_feature(dp_y))
feature_dict['image/object/densepose/u'] = (
dataset_util.float_list_feature(dp_u))
feature_dict['image/object/densepose/v'] = (
dataset_util.float_list_feature(dp_v))
num_densepose_annotation_skipped = (
len(densepose_annotations_dict) - num_densepose_annotation_used)
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return (key, example, num_annotations_skipped,
num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
def _create_tf_record_from_coco_annotations(annotations_file, image_dir,
output_path, include_masks,
num_shards,
keypoint_annotations_file='',
densepose_annotations_file='',
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: number of output file shards.
keypoint_annotations_file: JSON file containing the person keypoint
annotations. If empty, then no person keypoint annotations will be
generated.
densepose_annotations_file: JSON file containing the DensePose annotations.
If empty, then no DensePose annotations will be generated.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
category_index = label_map_util.create_category_index(
groundtruth_data['categories'])
annotations_index = {}
if 'annotations' in groundtruth_data:
logging.info('Found groundtruth annotations. Building annotations index.')
for annotation in groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in annotations_index:
annotations_index[image_id] = []
annotations_index[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in annotations_index:
missing_annotation_count += 1
annotations_index[image_id] = []
logging.info('%d images are missing annotations.',
missing_annotation_count)
keypoint_annotations_index = {}
if keypoint_annotations_file:
with tf.gfile.GFile(keypoint_annotations_file, 'r') as kid:
keypoint_groundtruth_data = json.load(kid)
if 'annotations' in keypoint_groundtruth_data:
for annotation in keypoint_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in keypoint_annotations_index:
keypoint_annotations_index[image_id] = {}
keypoint_annotations_index[image_id][annotation['id']] = annotation
densepose_annotations_index = {}
if densepose_annotations_file:
with tf.gfile.GFile(densepose_annotations_file, 'r') as fid:
densepose_groundtruth_data = json.load(fid)
if 'annotations' in densepose_groundtruth_data:
for annotation in densepose_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in densepose_annotations_index:
densepose_annotations_index[image_id] = {}
densepose_annotations_index[image_id][annotation['id']] = annotation
total_num_annotations_skipped = 0
total_num_keypoint_annotations_skipped = 0
total_num_densepose_annotations_skipped = 0
for idx, image in enumerate(images):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images))
annotations_list = annotations_index[image['id']]
keypoint_annotations_dict = None
if keypoint_annotations_file:
keypoint_annotations_dict = {}
if image['id'] in keypoint_annotations_index:
keypoint_annotations_dict = keypoint_annotations_index[image['id']]
densepose_annotations_dict = None
if densepose_annotations_file:
densepose_annotations_dict = {}
if image['id'] in densepose_annotations_index:
densepose_annotations_dict = densepose_annotations_index[image['id']]
(_, tf_example, num_annotations_skipped, num_keypoint_annotations_skipped,
num_densepose_annotations_skipped) = create_tf_example(
image, annotations_list, image_dir, category_index, include_masks,
keypoint_annotations_dict, densepose_annotations_dict,
remove_non_person_annotations, remove_non_person_images)
total_num_annotations_skipped += num_annotations_skipped
total_num_keypoint_annotations_skipped += num_keypoint_annotations_skipped
total_num_densepose_annotations_skipped += (
num_densepose_annotations_skipped)
shard_idx = idx % num_shards
if tf_example:
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
if keypoint_annotations_file:
logging.info('Finished writing, skipped %d keypoint annotations.',
total_num_keypoint_annotations_skipped)
if densepose_annotations_file:
logging.info('Finished writing, skipped %d DensePose annotations.',
total_num_densepose_annotations_skipped)
def main(_):
assert FLAGS.train_image_dir, '`train_image_dir` missing.'
assert FLAGS.val_image_dir, '`val_image_dir` missing.'
assert FLAGS.test_image_dir, '`test_image_dir` missing.'
assert FLAGS.train_annotations_file, '`train_annotations_file` missing.'
assert FLAGS.val_annotations_file, '`val_annotations_file` missing.'
assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.'
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record')
testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record')
_create_tf_record_from_coco_annotations(
FLAGS.train_annotations_file,
FLAGS.train_image_dir,
train_output_path,
FLAGS.include_masks,
num_shards=100,
keypoint_annotations_file=FLAGS.train_keypoint_annotations_file,
densepose_annotations_file=FLAGS.train_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.val_annotations_file,
FLAGS.val_image_dir,
val_output_path,
FLAGS.include_masks,
num_shards=50,
keypoint_annotations_file=FLAGS.val_keypoint_annotations_file,
densepose_annotations_file=FLAGS.val_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.testdev_annotations_file,
FLAGS.test_image_dir,
testdev_output_path,
FLAGS.include_masks,
num_shards=50)
if __name__ == '__main__':
tf.app.run()
| 22,961 | 43.242775 | 80 | py |
models | models-master/research/object_detection/dataset_tools/tf_record_creation_util.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
| 1,690 | 34.229167 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_ava_actions_tf_record.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Code to download and parse the AVA Actions dataset for TensorFlow models.
The [AVA Actions data set](
https://research.google.com/ava/index.html)
is a dataset for human action recognition.
This script downloads the annotations and prepares data from similar annotations
if local video files are available. The video files can be downloaded
from the following website:
https://github.com/cvdfoundation/ava-dataset
Prior to running this script, please run download_and_preprocess_ava.sh to
download input videos.
Running this code as a module generates the data set on disk. First, the
required files are downloaded (_download_data) which enables constructing the
label map. Then (in generate_examples), for each split in the data set, the
metadata and image frames are generated from the annotations for each sequence
example (_generate_examples). The data set is written to disk as a set of
numbered TFRecord files.
Generating the data on disk can take considerable time and disk space.
(Image compression quality is the primary determiner of disk usage.
If using the Tensorflow Object Detection API, set the input_type field
in the input_reader to TF_SEQUENCE_EXAMPLE. If using this script to generate
data for Context R-CNN scripts, the --examples_for_context flag should be
set to true, so that properly-formatted tf.example objects are written to disk.
This data is structured for per-clip action classification where images is
the sequence of images and labels are a one-hot encoded value. See
as_dataset() for more details.
Note that the number of videos changes in the data set over time, so it will
likely be necessary to change the expected number of examples.
The argument video_path_format_string expects a value as such:
'/path/to/videos/{0}'
"""
import collections
import contextlib
import csv
import glob
import hashlib
import os
import random
import sys
import zipfile
from absl import app
from absl import flags
from absl import logging
import cv2
from six.moves import range
from six.moves import urllib
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
POSSIBLE_TIMESTAMPS = range(902, 1798)
ANNOTATION_URL = 'https://research.google.com/ava/download/ava_v2.2.zip'
SECONDS_TO_MILLI = 1000
FILEPATTERN = 'ava_actions_%s_1fps_rgb'
SPLITS = {
'train': {
'shards': 1000,
'examples': 862663,
'csv': '',
'excluded-csv': ''
},
'val': {
'shards': 100,
'examples': 243029,
'csv': '',
'excluded-csv': ''
},
# Test doesn't have ground truth, so TF Records can't be created
'test': {
'shards': 100,
'examples': 0,
'csv': '',
'excluded-csv': ''
}
}
NUM_CLASSES = 80
def feature_list_feature(value):
return tf.train.FeatureList(feature=value)
class Ava(object):
"""Generates and loads the AVA Actions 2.2 data set."""
def __init__(self, path_to_output_dir, path_to_data_download):
if not path_to_output_dir:
raise ValueError('You must supply the path to the data directory.')
self.path_to_data_download = path_to_data_download
self.path_to_output_dir = path_to_output_dir
def generate_and_write_records(self,
splits_to_process='train,val,test',
video_path_format_string=None,
seconds_per_sequence=10,
hop_between_sequences=10,
examples_for_context=False):
"""Downloads data and generates sharded TFRecords.
Downloads the data files, generates metadata, and processes the metadata
with MediaPipe to produce tf.SequenceExamples for training. The resulting
files can be read with as_dataset(). After running this function the
original data files can be deleted.
Args:
splits_to_process: csv string of which splits to process. Allows
providing a custom CSV with the CSV flag. The original data is still
downloaded to generate the label_map.
video_path_format_string: The format string for the path to local files.
seconds_per_sequence: The length of each sequence, in seconds.
hop_between_sequences: The gap between the centers of
successive sequences.
examples_for_context: Whether to generate sequence examples with context
for context R-CNN.
"""
example_function = self._generate_sequence_examples
if examples_for_context:
example_function = self._generate_examples
logging.info('Downloading data.')
download_output = self._download_data()
for key in splits_to_process.split(','):
logging.info('Generating examples for split: %s', key)
all_metadata = list(example_function(
download_output[0][key][0], download_output[0][key][1],
download_output[1], seconds_per_sequence, hop_between_sequences,
video_path_format_string))
logging.info('An example of the metadata: ')
logging.info(all_metadata[0])
random.seed(47)
random.shuffle(all_metadata)
shards = SPLITS[key]['shards']
shard_names = [os.path.join(
self.path_to_output_dir, FILEPATTERN % key + '-%05d-of-%05d' % (
i, shards)) for i in range(shards)]
writers = [tf.io.TFRecordWriter(shard) for shard in shard_names]
with _close_on_exit(writers) as writers:
for i, seq_ex in enumerate(all_metadata):
writers[i % len(writers)].write(seq_ex.SerializeToString())
logging.info('Data extraction complete.')
def _generate_sequence_examples(self, annotation_file, excluded_file,
label_map, seconds_per_sequence,
hop_between_sequences,
video_path_format_string):
"""For each row in the annotation CSV, generates corresponding examples.
When iterating through frames for a single sequence example, skips over
excluded frames. When moving to the next sequence example, also skips over
excluded frames as if they don't exist. Generates equal-length sequence
examples, each with length seconds_per_sequence (1 fps) and gaps of
hop_between_sequences frames (and seconds) between them, possible greater
due to excluded frames.
Args:
annotation_file: path to the file of AVA CSV annotations.
excluded_file: path to a CSV file of excluded timestamps for each video.
label_map: an {int: string} label map.
seconds_per_sequence: The number of seconds per example in each example.
hop_between_sequences: The hop between sequences. If less than
seconds_per_sequence, will overlap.
video_path_format_string: File path format to glob video files.
Yields:
Each prepared tf.SequenceExample of metadata also containing video frames
"""
fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax',
'action_label']
frame_excluded = {}
# create a sparse, nested map of videos and frame indices.
with open(excluded_file, 'r') as excluded:
reader = csv.reader(excluded)
for row in reader:
frame_excluded[(row[0], int(float(row[1])))] = True
with open(annotation_file, 'r') as annotations:
reader = csv.DictReader(annotations, fieldnames)
frame_annotations = collections.defaultdict(list)
ids = set()
# aggreggate by video and timestamp:
for row in reader:
ids.add(row['id'])
key = (row['id'], int(float(row['timestamp_seconds'])))
frame_annotations[key].append(row)
# for each video, find aggregates near each sampled frame.:
logging.info('Generating metadata...')
media_num = 1
for media_id in ids:
logging.info('%d/%d, ignore warnings.\n', media_num, len(ids))
media_num += 1
filepath = glob.glob(
video_path_format_string.format(media_id) + '*')[0]
cur_vid = cv2.VideoCapture(filepath)
width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
start_time = middle_frame_time - seconds_per_sequence // 2 - (
0 if seconds_per_sequence % 2 == 0 else 1)
end_time = middle_frame_time + (seconds_per_sequence // 2)
total_boxes = []
total_labels = []
total_label_strings = []
total_images = []
total_source_ids = []
total_confidences = []
total_is_annotated = []
windowed_timestamp = start_time
while windowed_timestamp < end_time:
if (media_id, windowed_timestamp) in frame_excluded:
end_time += 1
windowed_timestamp += 1
logging.info('Ignoring and skipping excluded frame.')
continue
cur_vid.set(cv2.CAP_PROP_POS_MSEC,
(windowed_timestamp) * SECONDS_TO_MILLI)
_, image = cur_vid.read()
_, buffer = cv2.imencode('.jpg', image)
bufstring = buffer.tostring()
total_images.append(bufstring)
source_id = str(windowed_timestamp) + '_' + media_id
total_source_ids.append(source_id)
total_is_annotated.append(1)
boxes = []
labels = []
label_strings = []
confidences = []
for row in frame_annotations[(media_id, windowed_timestamp)]:
if len(row) > 2 and int(row['action_label']) in label_map:
boxes.append([float(row['ymin']), float(row['xmin']),
float(row['ymax']), float(row['xmax'])])
labels.append(int(row['action_label']))
label_strings.append(label_map[int(row['action_label'])])
confidences.append(1)
else:
logging.warning('Unknown label: %s', row['action_label'])
total_boxes.append(boxes)
total_labels.append(labels)
total_label_strings.append(label_strings)
total_confidences.append(confidences)
windowed_timestamp += 1
if total_boxes:
yield seq_example_util.make_sequence_example(
'AVA', media_id, total_images, int(height), int(width), 'jpeg',
total_source_ids, None, total_is_annotated, total_boxes,
total_label_strings, use_strs_for_source_id=True)
# Move middle_time_frame, skipping excluded frames
frames_mv = 0
frames_excluded_count = 0
while (frames_mv < hop_between_sequences + frames_excluded_count
and middle_frame_time + frames_mv < POSSIBLE_TIMESTAMPS[-1]):
frames_mv += 1
if (media_id, windowed_timestamp + frames_mv) in frame_excluded:
frames_excluded_count += 1
middle_frame_time += frames_mv
cur_vid.release()
def _generate_examples(self, annotation_file, excluded_file, label_map,
seconds_per_sequence, hop_between_sequences,
video_path_format_string):
"""For each row in the annotation CSV, generates examples.
When iterating through frames for a single example, skips
over excluded frames. Generates equal-length sequence examples, each with
length seconds_per_sequence (1 fps) and gaps of hop_between_sequences
frames (and seconds) between them, possible greater due to excluded frames.
Args:
annotation_file: path to the file of AVA CSV annotations.
excluded_file: path to a CSV file of excluded timestamps for each video.
label_map: an {int: string} label map.
seconds_per_sequence: The number of seconds per example in each example.
hop_between_sequences: The hop between sequences. If less than
seconds_per_sequence, will overlap.
video_path_format_string: File path format to glob video files.
Yields:
Each prepared tf.Example of metadata also containing video frames
"""
del seconds_per_sequence
del hop_between_sequences
fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax',
'action_label']
frame_excluded = {}
# create a sparse, nested map of videos and frame indices.
with open(excluded_file, 'r') as excluded:
reader = csv.reader(excluded)
for row in reader:
frame_excluded[(row[0], int(float(row[1])))] = True
with open(annotation_file, 'r') as annotations:
reader = csv.DictReader(annotations, fieldnames)
frame_annotations = collections.defaultdict(list)
ids = set()
# aggreggate by video and timestamp:
for row in reader:
ids.add(row['id'])
key = (row['id'], int(float(row['timestamp_seconds'])))
frame_annotations[key].append(row)
# for each video, find aggreggates near each sampled frame.:
logging.info('Generating metadata...')
media_num = 1
for media_id in ids:
logging.info('%d/%d, ignore warnings.\n', media_num, len(ids))
media_num += 1
filepath = glob.glob(
video_path_format_string.format(media_id) + '*')[0]
cur_vid = cv2.VideoCapture(filepath)
width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
total_non_excluded = 0
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
if (media_id, middle_frame_time) not in frame_excluded:
total_non_excluded += 1
middle_frame_time += 1
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
cur_frame_num = 0
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
cur_vid.set(cv2.CAP_PROP_POS_MSEC,
middle_frame_time * SECONDS_TO_MILLI)
_, image = cur_vid.read()
_, buffer = cv2.imencode('.jpg', image)
bufstring = buffer.tostring()
if (media_id, middle_frame_time) in frame_excluded:
middle_frame_time += 1
logging.info('Ignoring and skipping excluded frame.')
continue
cur_frame_num += 1
source_id = str(middle_frame_time) + '_' + media_id
xmins = []
xmaxs = []
ymins = []
ymaxs = []
areas = []
labels = []
label_strings = []
confidences = []
for row in frame_annotations[(media_id, middle_frame_time)]:
if len(row) > 2 and int(row['action_label']) in label_map:
xmins.append(float(row['xmin']))
xmaxs.append(float(row['xmax']))
ymins.append(float(row['ymin']))
ymaxs.append(float(row['ymax']))
areas.append(float((xmaxs[-1] - xmins[-1]) *
(ymaxs[-1] - ymins[-1])) / 2)
labels.append(int(row['action_label']))
label_strings.append(label_map[int(row['action_label'])])
confidences.append(1)
else:
logging.warning('Unknown label: %s', row['action_label'])
middle_frame_time += 1/3
if abs(middle_frame_time - round(middle_frame_time) < 0.0001):
middle_frame_time = round(middle_frame_time)
key = hashlib.sha256(bufstring).hexdigest()
date_captured_feature = (
'2020-06-17 00:%02d:%02d' % ((middle_frame_time - 900)*3 // 60,
(middle_frame_time - 900)*3 % 60))
context_feature_dict = {
'image/height':
dataset_util.int64_feature(int(height)),
'image/width':
dataset_util.int64_feature(int(width)),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(source_id.encode('utf8')),
'image/filename':
dataset_util.bytes_feature(source_id.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(bufstring),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymaxs),
'image/object/area':
dataset_util.float_list_feature(areas),
'image/object/class/label':
dataset_util.int64_list_feature(labels),
'image/object/class/text':
dataset_util.bytes_list_feature(label_strings),
'image/location':
dataset_util.bytes_feature(media_id.encode('utf8')),
'image/date_captured':
dataset_util.bytes_feature(
date_captured_feature.encode('utf8')),
'image/seq_num_frames':
dataset_util.int64_feature(total_non_excluded),
'image/seq_frame_num':
dataset_util.int64_feature(cur_frame_num),
'image/seq_id':
dataset_util.bytes_feature(media_id.encode('utf8')),
}
yield tf.train.Example(
features=tf.train.Features(feature=context_feature_dict))
cur_vid.release()
def _download_data(self):
"""Downloads and extracts data if not already available."""
if sys.version_info >= (3, 0):
urlretrieve = urllib.request.urlretrieve
else:
urlretrieve = urllib.request.urlretrieve
logging.info('Creating data directory.')
tf.io.gfile.makedirs(self.path_to_data_download)
logging.info('Downloading annotations.')
paths = {}
zip_path = os.path.join(self.path_to_data_download,
ANNOTATION_URL.split('/')[-1])
urlretrieve(ANNOTATION_URL, zip_path)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(self.path_to_data_download)
for split in ['train', 'test', 'val']:
csv_path = os.path.join(self.path_to_data_download,
'ava_%s_v2.2.csv' % split)
excl_name = 'ava_%s_excluded_timestamps_v2.2.csv' % split
excluded_csv_path = os.path.join(self.path_to_data_download, excl_name)
SPLITS[split]['csv'] = csv_path
SPLITS[split]['excluded-csv'] = excluded_csv_path
paths[split] = (csv_path, excluded_csv_path)
label_map = self.get_label_map(os.path.join(
self.path_to_data_download,
'ava_action_list_v2.2_for_activitynet_2019.pbtxt'))
return paths, label_map
def get_label_map(self, path):
"""Parses a label map into {integer:string} format."""
label_map_dict = label_map_util.get_label_map_dict(path)
label_map_dict = {v: bytes(k, 'utf8') for k, v in label_map_dict.items()}
logging.info(label_map_dict)
return label_map_dict
@contextlib.contextmanager
def _close_on_exit(writers):
"""Call close on all writers on exit."""
try:
yield writers
finally:
for writer in writers:
writer.close()
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
Ava(flags.FLAGS.path_to_output_dir,
flags.FLAGS.path_to_download_data).generate_and_write_records(
flags.FLAGS.splits_to_process,
flags.FLAGS.video_path_format_string,
flags.FLAGS.seconds_per_sequence,
flags.FLAGS.hop_between_sequences,
flags.FLAGS.examples_for_context)
if __name__ == '__main__':
flags.DEFINE_string('path_to_download_data',
'',
'Path to directory to download data to.')
flags.DEFINE_string('path_to_output_dir',
'',
'Path to directory to write data to.')
flags.DEFINE_string('splits_to_process',
'train,val',
'Process these splits. Useful for custom data splits.')
flags.DEFINE_string('video_path_format_string',
None,
'The format string for the path to local video files. '
'Uses the Python string.format() syntax with possible '
'arguments of {video}, {start}, {end}, {label_name}, and '
'{split}, corresponding to columns of the data csvs.')
flags.DEFINE_integer('seconds_per_sequence',
10,
'The number of seconds per example in each example.'
'Always 1 when examples_for_context is True.')
flags.DEFINE_integer('hop_between_sequences',
10,
'The hop between sequences. If less than '
'seconds_per_sequence, will overlap. Always 1 when '
'examples_for_context is True.')
flags.DEFINE_boolean('examples_for_context',
False,
'Whether to generate examples instead of sequence '
'examples. If true, will generate tf.Example objects '
'for use in Context R-CNN.')
app.run(main)
| 22,448 | 40.495379 | 80 | py |
models | models-master/research/object_detection/dataset_tools/seq_example_util_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.seq_example_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import tf_version
class SeqExampleUtilTest(tf.test.TestCase):
def materialize_tensors(self, list_of_tensors):
if tf_version.is_tf2():
return [tensor.numpy() for tensor in list_of_tensors]
else:
with self.cached_session() as sess:
return sess.run(list_of_tensors)
def test_make_unlabeled_example(self):
num_frames = 5
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
image_source_ids = [str(idx) for idx in range(num_frames)]
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=image_source_ids)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
0,
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames - 1,
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
self.assertEqual(
3,
context_feature_dict['image/channels'].int64_list.value[0])
self.assertEqual(
b'JPEG',
context_feature_dict['image/format'].bytes_list.value[0])
self.assertEqual(
image_height,
context_feature_dict['image/height'].int64_list.value[0])
self.assertEqual(
image_width,
context_feature_dict['image/width'].int64_list.value[0])
self.assertEqual(
video_id,
context_feature_dict['clip/media_id'].bytes_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(list(range(num_frames)), timestamps)
source_ids = [
feature.bytes_list.value[0] for feature
in seq_feature_dict['image/source_id'].feature]
self.assertAllEqual(
[six.ensure_binary(str(idx)) for idx in range(num_frames)],
source_ids)
def test_make_labeled_example(self):
num_frames = 3
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
labels = [b'dog', b'cat', b'wolf']
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
timestamps = [100000, 110000, 120000]
is_annotated = [1, 0, 1]
bboxes = [
np.array([[0., 0., 0., 0.],
[0., 0., 1., 1.]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32),
np.array([], dtype=np.float32)
]
label_strings = [
np.array(labels),
np.array([]),
np.array([])
]
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
timestamps=timestamps,
is_annotated=is_annotated,
bboxes=bboxes,
label_strings=label_strings)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
timestamps[0],
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
timestamps[-1],
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(timestamps, actual_timestamps)
# Frame 0.
self.assertAllEqual(
is_annotated[0],
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
labels,
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Frame 1.
self.assertAllEqual(
is_annotated[1],
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def test_make_labeled_example_with_context_features(self):
num_frames = 2
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
labels = [b'dog', b'cat']
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
timestamps = [100000, 110000]
is_annotated = [1, 0]
bboxes = [
np.array([[0., 0., 0., 0.],
[0., 0., 1., 1.]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32)
]
label_strings = [
np.array(labels),
np.array([])
]
context_features = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
context_feature_length = [3]
context_features_image_id_list = [b'im_1', b'im_2']
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
timestamps=timestamps,
is_annotated=is_annotated,
bboxes=bboxes,
label_strings=label_strings,
context_features=context_features,
context_feature_length=context_feature_length,
context_features_image_id_list=context_features_image_id_list)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
timestamps[0],
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
timestamps[-1],
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
self.assertAllClose(
context_features,
context_feature_dict['image/context_features'].float_list.value[:])
self.assertEqual(
context_feature_length[0],
context_feature_dict[
'image/context_feature_length'].int64_list.value[0])
self.assertEqual(
context_features_image_id_list,
context_feature_dict[
'image/context_features_image_id_list'].bytes_list.value[:])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(timestamps, actual_timestamps)
# Frame 0.
self.assertAllEqual(
is_annotated[0],
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
labels,
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Frame 1.
self.assertAllEqual(
is_annotated[1],
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def test_make_labeled_example_with_predictions(self):
num_frames = 2
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
bboxes = [
np.array([[0., 0., 0.75, 0.75],
[0., 0., 1., 1.]], dtype=np.float32),
np.array([[0., 0.25, 0.5, 0.75]], dtype=np.float32)
]
label_strings = [
np.array(['cat', 'frog']),
np.array(['cat'])
]
detection_bboxes = [
np.array([[0., 0., 0.75, 0.75]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32)
]
detection_classes = [
np.array([5], dtype=np.int64),
np.array([], dtype=np.int64)
]
detection_scores = [
np.array([0.9], dtype=np.float32),
np.array([], dtype=np.float32)
]
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
bboxes=bboxes,
label_strings=label_strings,
detection_bboxes=detection_bboxes,
detection_classes=detection_classes,
detection_scores=detection_scores)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
0,
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
1,
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual([0, 1], actual_timestamps)
# Frame 0.
self.assertAllEqual(
1,
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75, 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75, 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[b'cat', b'frog'],
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
self.assertAllClose(
[0.],
seq_feature_dict[
'predicted/region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.],
seq_feature_dict[
'predicted/region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict[
'predicted/region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict[
'predicted/region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[5],
seq_feature_dict[
'predicted/region/label/index'].feature[0].int64_list.value[:])
self.assertAllClose(
[0.9],
seq_feature_dict[
'predicted/region/label/confidence'].feature[0].float_list.value[:])
# Frame 1.
self.assertAllEqual(
1,
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[0.0],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.25],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.5],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[b'cat'],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict[
'predicted/region/label/index'].feature[1].int64_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/label/confidence'].feature[1].float_list.value[:])
if __name__ == '__main__':
tf.test.main()
| 17,289 | 34.797101 | 80 | py |
models | models-master/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the OpenImages label expansion (OIDHierarchicalLabelsExpansion)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_hierarchical_labels_expansion
def create_test_data():
hierarchy = {
'LabelName':
'a',
'Subcategory': [{
'LabelName': 'b'
}, {
'LabelName':
'c',
'Subcategory': [{
'LabelName': 'd'
}, {
'LabelName': 'e'
}, {
'LabelName': 'f',
'Subcategory': [{
'LabelName': 'd'
},]
}]
}, {
'LabelName': 'f',
'Subcategory': [{
'LabelName': 'd'
},]
}]
}
bbox_rows = [
'123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',
'123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0'
]
label_rows = [
'123,verification,b,0', '123,verification,c,0', '124,verification,d,1'
]
segm_rows = [
'123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK',
'123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK',
]
return hierarchy, bbox_rows, segm_rows, label_rows
class HierarchicalLabelsExpansionTest(tf.test.TestCase):
def test_bbox_expansion(self):
hierarchy, bbox_rows, _, _ = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in bbox_rows:
all_result_rows.extend(
expansion_generator.expand_boxes_or_segments_from_csv(row, 2))
self.assertItemsEqual([
'123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',
'123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0',
'123,xclick,f,1,0.2,0.3,0.1,0.2,1,1,0,0,0',
'123,xclick,c,1,0.2,0.3,0.1,0.2,1,1,0,0,0'
], all_result_rows)
def test_segm_expansion(self):
hierarchy, _, segm_rows, _ = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in segm_rows:
all_result_rows.extend(
expansion_generator.expand_boxes_or_segments_from_csv(row, 2))
self.assertItemsEqual([
'123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK',
'123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK',
'123,cc,f,100,100,0.2,0.3,0.1,0.2,0,MASK',
'123,cc,c,100,100,0.2,0.3,0.1,0.2,0,MASK'
], all_result_rows)
def test_labels_expansion(self):
hierarchy, _, _, label_rows = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in label_rows:
all_result_rows.extend(
expansion_generator.expand_labels_from_csv(row, 2, 3))
self.assertItemsEqual([
'123,verification,b,0', '123,verification,c,0', '123,verification,d,0',
'123,verification,f,0', '123,verification,e,0', '124,verification,d,1',
'124,verification,f,1', '124,verification,c,1'
], all_result_rows)
if __name__ == '__main__':
tf.test.main()
| 3,915 | 32.470085 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_coco_tf_record_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_coco_tf_record.py."""
import io
import json
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_coco_tf_record
class CreateCocoTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def _assertProtoClose(self, proto_field, expectation):
"""Helper function to assert if a proto field nearly equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertAllClose(proto_list, expectation)
def test_create_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
tmp_dir = self.get_temp_dir()
save_path = os.path.join(tmp_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
annotations_list = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 2,
'id': 1000,
}]
image_dir = tmp_dir
category_index = {
1: {
'name': 'dog',
'id': 1
},
2: {
'name': 'cat',
'id': 2
},
3: {
'name': 'human',
'id': 3
}
}
(_, example,
num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example(
image, annotations_list, image_dir, category_index)
self.assertEqual(num_annotations_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('cat')])
def test_create_tf_example_with_instance_masks(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(8, 8, 3)
tmp_dir = self.get_temp_dir()
save_path = os.path.join(tmp_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 8,
'width': 8,
'id': 11,
}
annotations_list = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [0, 0, 8, 8],
'segmentation': [[4, 0, 0, 0, 0, 4], [8, 4, 4, 8, 8, 8]],
'category_id': 1,
'id': 1000,
}]
image_dir = tmp_dir
category_index = {
1: {
'name': 'dog',
'id': 1
},
}
(_, example,
num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example(
image, annotations_list, image_dir, category_index, include_masks=True)
self.assertEqual(num_annotations_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [8])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [8])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('dog')])
encoded_mask_pngs = [
io.BytesIO(encoded_masks) for encoded_masks in example.features.feature[
'image/object/mask'].bytes_list.value
]
pil_masks = [
np.array(PIL.Image.open(encoded_mask_png))
for encoded_mask_png in encoded_mask_pngs
]
self.assertEqual(len(pil_masks), 1)
self.assertAllEqual(pil_masks[0],
[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]])
def test_create_tf_example_with_keypoints(self):
image_dir = self.get_temp_dir()
image_file_name = 'tmp_image.jpg'
image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype(
np.uint8)
save_path = os.path.join(image_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
min_x, min_y = 64, 64
max_x, max_y = 128, 128
keypoints = []
num_visible_keypoints = 0
xv = []
yv = []
vv = []
for _ in range(17):
xc = min_x + int(np.random.rand()*(max_x - min_x))
yc = min_y + int(np.random.rand()*(max_y - min_y))
vis = np.random.randint(0, 3)
xv.append(xc)
yv.append(yc)
vv.append(vis)
keypoints.extend([xc, yc, vis])
num_visible_keypoints += (vis > 0)
annotations_list = [{
'area': 0.5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 1,
'id': 1000
}]
keypoint_annotations_dict = {
1000: {
'keypoints': keypoints,
'num_keypoints': num_visible_keypoints
}
}
category_index = {
1: {
'name': 'person',
'id': 1
}
}
_, example, _, num_keypoint_annotation_skipped, _ = (
create_coco_tf_record.create_tf_example(
image,
annotations_list,
image_dir,
category_index,
include_masks=False,
keypoint_annotations_dict=keypoint_annotations_dict))
self.assertEqual(num_keypoint_annotation_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoClose(
example.features.feature['image/object/keypoint/x'].float_list.value,
np.array(xv, dtype=np.float32) / 256)
self._assertProtoClose(
example.features.feature['image/object/keypoint/y'].float_list.value,
np.array(yv, dtype=np.float32) / 256)
self._assertProtoEqual(
example.features.feature['image/object/keypoint/text'].bytes_list.value,
create_coco_tf_record._COCO_KEYPOINT_NAMES)
self._assertProtoEqual(
example.features.feature[
'image/object/keypoint/visibility'].int64_list.value, vv)
def test_create_tf_example_with_dense_pose(self):
image_dir = self.get_temp_dir()
image_file_name = 'tmp_image.jpg'
image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype(
np.uint8)
save_path = os.path.join(image_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
min_x, min_y = 64, 64
max_x, max_y = 128, 128
keypoints = []
num_visible_keypoints = 0
xv = []
yv = []
vv = []
for _ in range(17):
xc = min_x + int(np.random.rand()*(max_x - min_x))
yc = min_y + int(np.random.rand()*(max_y - min_y))
vis = np.random.randint(0, 3)
xv.append(xc)
yv.append(yc)
vv.append(vis)
keypoints.extend([xc, yc, vis])
num_visible_keypoints += (vis > 0)
annotations_list = [{
'area': 0.5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 1,
'id': 1000
}]
num_points = 45
dp_i = np.random.randint(1, 25, (num_points,)).astype(np.float32)
dp_u = np.random.randn(num_points)
dp_v = np.random.randn(num_points)
dp_x = np.random.rand(num_points)*256.
dp_y = np.random.rand(num_points)*256.
densepose_annotations_dict = {
1000: {
'dp_I': dp_i,
'dp_U': dp_u,
'dp_V': dp_v,
'dp_x': dp_x,
'dp_y': dp_y,
'bbox': [64, 64, 128, 128],
}
}
category_index = {
1: {
'name': 'person',
'id': 1
}
}
_, example, _, _, num_densepose_annotation_skipped = (
create_coco_tf_record.create_tf_example(
image,
annotations_list,
image_dir,
category_index,
include_masks=False,
densepose_annotations_dict=densepose_annotations_dict))
self.assertEqual(num_densepose_annotation_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoEqual(
example.features.feature['image/object/densepose/num'].int64_list.value,
[num_points])
self.assertAllEqual(
example.features.feature[
'image/object/densepose/part_index'].int64_list.value,
dp_i.astype(np.int64) - create_coco_tf_record._DP_PART_ID_OFFSET)
self.assertAllClose(
example.features.feature['image/object/densepose/u'].float_list.value,
dp_u)
self.assertAllClose(
example.features.feature['image/object/densepose/v'].float_list.value,
dp_v)
expected_dp_x = (64 + dp_x * 128. / 256.) / 256.
expected_dp_y = (64 + dp_y * 128. / 256.) / 256.
self.assertAllClose(
example.features.feature['image/object/densepose/x'].float_list.value,
expected_dp_x)
self.assertAllClose(
example.features.feature['image/object/densepose/y'].float_list.value,
expected_dp_y)
def test_create_sharded_tf_record(self):
tmp_dir = self.get_temp_dir()
image_paths = ['tmp1_image.jpg', 'tmp2_image.jpg']
for image_path in image_paths:
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(tmp_dir, image_path)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
images = [{
'file_name': image_paths[0],
'height': 256,
'width': 256,
'id': 11,
}, {
'file_name': image_paths[1],
'height': 256,
'width': 256,
'id': 12,
}]
annotations = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 2,
'id': 1000,
}]
category_index = [{
'name': 'dog',
'id': 1
}, {
'name': 'cat',
'id': 2
}, {
'name': 'human',
'id': 3
}]
groundtruth_data = {'images': images, 'annotations': annotations,
'categories': category_index}
annotation_file = os.path.join(tmp_dir, 'annotation.json')
with open(annotation_file, 'w') as annotation_fid:
json.dump(groundtruth_data, annotation_fid)
output_path = os.path.join(tmp_dir, 'out.record')
create_coco_tf_record._create_tf_record_from_coco_annotations(
annotation_file,
tmp_dir,
output_path,
False,
2)
self.assertTrue(os.path.exists(output_path + '-00000-of-00002'))
self.assertTrue(os.path.exists(output_path + '-00001-of-00002'))
if __name__ == '__main__':
tf.test.main()
| 16,192 | 31.516064 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_pascal_tf_record_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_pascal_tf_record.py."""
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_pascal_tf_record
class CreatePascalTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
data = {
'folder': '',
'filename': image_file_name,
'size': {
'height': 256,
'width': 256,
},
'object': [
{
'difficult': 1,
'bndbox': {
'xmin': 64,
'ymin': 64,
'xmax': 192,
'ymax': 192,
},
'name': 'person',
'truncated': 0,
'pose': '',
},
],
}
label_map_dict = {
'background': 0,
'person': 1,
'notperson': 2,
}
example = create_pascal_tf_record.dict_to_tf_example(
data, self.get_temp_dir(), label_map_dict, image_subdirectory='')
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoEqual(
example.features.feature['image/object/class/label'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/difficult'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/truncated'].int64_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/view'].bytes_list.value,
[six.b('')])
if __name__ == '__main__':
tf.test.main()
| 4,043 | 32.147541 | 80 | py |
models | models-master/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""An executable to expand image-level labels, boxes and segments.
The expansion is performed using class hierarchy, provided in JSON file.
The expected file formats are the following:
- for box and segment files: CSV file is expected to have LabelName field
- for image-level labels: CSV file is expected to have LabelName and Confidence
fields
Note, that LabelName is the only field used for expansion.
Example usage:
python models/research/object_detection/dataset_tools/\
oid_hierarchical_labels_expansion.py \
--json_hierarchy_file=<path to JSON hierarchy> \
--input_annotations=<input csv file> \
--output_annotations=<output csv file> \
--annotation_type=<1 (for boxes and segments) or 2 (for image-level labels)>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
from absl import app
from absl import flags
import six
flags.DEFINE_string(
'json_hierarchy_file', None,
'Path to the file containing label hierarchy in JSON format.')
flags.DEFINE_string(
'input_annotations', None, 'Path to Open Images annotations file'
'(either bounding boxes, segments or image-level labels).')
flags.DEFINE_string('output_annotations', None, 'Path to the output file.')
flags.DEFINE_integer(
'annotation_type', None,
'Type of the input annotations: 1 - boxes or segments,'
'2 - image-level labels.'
)
FLAGS = flags.FLAGS
def _update_dict(initial_dict, update):
"""Updates dictionary with update content.
Args:
initial_dict: initial dictionary.
update: updated dictionary.
"""
for key, value_list in update.items():
if key in initial_dict:
initial_dict[key].update(value_list)
else:
initial_dict[key] = set(value_list)
def _build_plain_hierarchy(hierarchy, skip_root=False):
"""Expands tree hierarchy representation to parent-child dictionary.
Args:
hierarchy: labels hierarchy as JSON file.
skip_root: if true skips root from the processing (done for the case when all
classes under hierarchy are collected under virtual node).
Returns:
keyed_parent - dictionary of parent - all its children nodes.
keyed_child - dictionary of children - all its parent nodes
children - all children of the current node.
"""
all_children = set([])
all_keyed_parent = {}
all_keyed_child = {}
if 'Subcategory' in hierarchy:
for node in hierarchy['Subcategory']:
keyed_parent, keyed_child, children = _build_plain_hierarchy(node)
# Update is not done through dict.update() since some children have multi-
# ple parents in the hiearchy.
_update_dict(all_keyed_parent, keyed_parent)
_update_dict(all_keyed_child, keyed_child)
all_children.update(children)
if not skip_root:
all_keyed_parent[hierarchy['LabelName']] = copy.deepcopy(all_children)
all_children.add(hierarchy['LabelName'])
for child, _ in all_keyed_child.items():
all_keyed_child[child].add(hierarchy['LabelName'])
all_keyed_child[hierarchy['LabelName']] = set([])
return all_keyed_parent, all_keyed_child, all_children
class OIDHierarchicalLabelsExpansion(object):
""" Main class to perform labels hierachical expansion."""
def __init__(self, hierarchy):
"""Constructor.
Args:
hierarchy: labels hierarchy as JSON object.
"""
self._hierarchy_keyed_parent, self._hierarchy_keyed_child, _ = (
_build_plain_hierarchy(hierarchy, skip_root=True))
def expand_boxes_or_segments_from_csv(self, csv_row,
labelname_column_index=1):
"""Expands a row containing bounding boxes/segments from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
labelname_column_index: 0-based index of LabelName column in CSV file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018/2019 metrics.
"""
# Row header is expected to be the following for boxes:
# ImageID,LabelName,Confidence,XMin,XMax,YMin,YMax,IsGroupOf
# Row header is expected to be the following for segments:
# ImageID,LabelName,ImageWidth,ImageHeight,XMin,XMax,YMin,YMax,
# IsGroupOf,Mask
split_csv_row = six.ensure_str(csv_row).split(',')
result = [csv_row]
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[
split_csv_row[labelname_column_index]]
for parent_node in parent_nodes:
split_csv_row[labelname_column_index] = parent_node
result.append(','.join(split_csv_row))
return result
def expand_labels_from_csv(self,
csv_row,
labelname_column_index=1,
confidence_column_index=2):
"""Expands a row containing labels from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
labelname_column_index: 0-based index of LabelName column in CSV file.
confidence_column_index: 0-based index of Confidence column in CSV file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018/2019 metrics.
"""
# Row header is expected to be exactly:
# ImageID,Source,LabelName,Confidence
split_csv_row = six.ensure_str(csv_row).split(',')
result = [csv_row]
if int(split_csv_row[confidence_column_index]) == 1:
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[
split_csv_row[labelname_column_index]]
for parent_node in parent_nodes:
split_csv_row[labelname_column_index] = parent_node
result.append(','.join(split_csv_row))
else:
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_parent
child_nodes = self._hierarchy_keyed_parent[
split_csv_row[labelname_column_index]]
for child_node in child_nodes:
split_csv_row[labelname_column_index] = child_node
result.append(','.join(split_csv_row))
return result
def main(unused_args):
del unused_args
with open(FLAGS.json_hierarchy_file) as f:
hierarchy = json.load(f)
expansion_generator = OIDHierarchicalLabelsExpansion(hierarchy)
labels_file = False
if FLAGS.annotation_type == 2:
labels_file = True
elif FLAGS.annotation_type != 1:
print('--annotation_type expected value is 1 or 2.')
return -1
confidence_column_index = -1
labelname_column_index = -1
with open(FLAGS.input_annotations, 'r') as source:
with open(FLAGS.output_annotations, 'w') as target:
header = source.readline()
target.writelines([header])
column_names = header.strip().split(',')
labelname_column_index = column_names.index('LabelName')
if labels_file:
confidence_column_index = column_names.index('Confidence')
for line in source:
if labels_file:
expanded_lines = expansion_generator.expand_labels_from_csv(
line, labelname_column_index, confidence_column_index)
else:
expanded_lines = (
expansion_generator.expand_boxes_or_segments_from_csv(
line, labelname_column_index))
target.writelines(expanded_lines)
if __name__ == '__main__':
flags.mark_flag_as_required('json_hierarchy_file')
flags.mark_flag_as_required('input_annotations')
flags.mark_flag_as_required('output_annotations')
flags.mark_flag_as_required('annotation_type')
app.run(main)
| 8,501 | 35.48927 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_kitti_tf_record_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_kitti_tf_record.py."""
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_kitti_tf_record
class CreateKittiTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
annotations = {}
annotations['2d_bbox_left'] = np.array([64])
annotations['2d_bbox_top'] = np.array([64])
annotations['2d_bbox_right'] = np.array([192])
annotations['2d_bbox_bottom'] = np.array([192])
annotations['type'] = ['car']
annotations['truncated'] = np.array([1])
annotations['alpha'] = np.array([2])
annotations['3d_bbox_height'] = np.array([10])
annotations['3d_bbox_width'] = np.array([11])
annotations['3d_bbox_length'] = np.array([12])
annotations['3d_bbox_x'] = np.array([13])
annotations['3d_bbox_y'] = np.array([14])
annotations['3d_bbox_z'] = np.array([15])
annotations['3d_bbox_rot_y'] = np.array([4])
label_map_dict = {
'background': 0,
'car': 1,
}
example = create_kitti_tf_record.prepare_example(
save_path,
annotations,
label_map_dict)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(save_path)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(save_path)])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('png')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('car')])
self._assertProtoEqual(
example.features.feature['image/object/class/label'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/truncated'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/alpha'].float_list.value,
[2])
self._assertProtoEqual(example.features.feature[
'image/object/3d_bbox/height'].float_list.value, [10])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/width'].float_list.value,
[11])
self._assertProtoEqual(example.features.feature[
'image/object/3d_bbox/length'].float_list.value, [12])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/x'].float_list.value,
[13])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/y'].float_list.value,
[14])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/z'].float_list.value,
[15])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/rot_y'].float_list.value,
[4])
if __name__ == '__main__':
tf.test.main()
| 4,850 | 35.473684 | 80 | py |
models | models-master/research/object_detection/dataset_tools/tf_record_creation_util_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_record_creation_util.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
class OpenOutputTfrecordsTests(tf.test.TestCase):
def test_sharded_tfrecord_writes(self):
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack,
os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10)
for idx in range(10):
output_tfrecords[idx].write(six.ensure_binary('test_{}'.format(idx)))
for idx in range(10):
tf_record_path = '{}-{:05d}-of-00010'.format(
os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx)
records = list(tf.python_io.tf_record_iterator(tf_record_path))
self.assertAllEqual(records, ['test_{}'.format(idx).encode('utf-8')])
if __name__ == '__main__':
tf.test.main()
| 1,791 | 35.571429 | 80 | py |
models | models-master/research/object_detection/dataset_tools/oid_tfrecord_creation_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_tfrecord_creation.py."""
import pandas as pd
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_tfrecord_creation
def create_test_data():
data = {
'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'],
'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'],
'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0],
'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1],
'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9],
'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8],
'IsOccluded': [0, 1, 1, 0, None, 0, 0],
'IsTruncated': [0, 0, 0, 1, None, 0, 0],
'IsGroupOf': [0, 0, 0, 0, None, 0, 1],
'IsDepiction': [1, 0, 0, 0, None, 0, 0],
'ConfidenceImageLabel': [None, None, None, None, 0, None, None],
}
df = pd.DataFrame(data=data)
label_map = {'a': 0, 'b': 1, 'c': 2}
return label_map, df
class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase):
def test_simple(self):
label_map, df = create_test_data()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0, 1, 1] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a", "b", "b"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0, 0, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0, 0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1, 1, 0] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0, 0, 1] } } }
feature {
key: "image/class/label"
value { int64_list { value: [2] } } }
feature {
key: "image/class/text"
value { bytes_list { value: ["c"] } } } }
"""), tf_example)
def test_no_attributes(self):
label_map, df = create_test_data()
del df['IsDepiction']
del df['IsGroupOf']
del df['IsOccluded']
del df['IsTruncated']
del df['ConfidenceImageLabel']
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i2'], label_map, 'encoded_image_test')
self.assertProtoEquals(six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i2.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.0, 0.0] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.1] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.8, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.9, 0.9] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [1, 2] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["b", "c"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i2" } } } }
"""), tf_example)
def test_label_filtering(self):
label_map, df = create_test_data()
label_map = {'a': 0}
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/class/label"
value { int64_list { } } }
feature {
key: "image/class/text"
value { bytes_list { } } } }
"""), tf_example)
if __name__ == '__main__':
tf.test.main()
| 7,318 | 35.412935 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_oid_tf_record.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| 5,208 | 43.144068 | 80 | py |
models | models-master/research/object_detection/dataset_tools/oid_tfrecord_creation.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import dataset_util
def tf_example_from_annotations_data_frame(annotations_data_frame, label_map,
encoded_image):
"""Populates a TF Example message with image annotations from a data frame.
Args:
annotations_data_frame: Data frame containing the annotations for a single
image.
label_map: String to integer label map.
encoded_image: The encoded image string
Returns:
The populated TF Example, if the label of at least one object is present in
label_map. Otherwise, returns None.
"""
filtered_data_frame = annotations_data_frame[
annotations_data_frame.LabelName.isin(label_map)]
filtered_data_frame_boxes = filtered_data_frame[
~filtered_data_frame.YMin.isnull()]
filtered_data_frame_labels = filtered_data_frame[
filtered_data_frame.YMin.isnull()]
image_id = annotations_data_frame.ImageID.iloc[0]
feature_map = {
standard_fields.TfExampleFields.object_bbox_ymin:
dataset_util.float_list_feature(
filtered_data_frame_boxes.YMin.to_numpy()),
standard_fields.TfExampleFields.object_bbox_xmin:
dataset_util.float_list_feature(
filtered_data_frame_boxes.XMin.to_numpy()),
standard_fields.TfExampleFields.object_bbox_ymax:
dataset_util.float_list_feature(
filtered_data_frame_boxes.YMax.to_numpy()),
standard_fields.TfExampleFields.object_bbox_xmax:
dataset_util.float_list_feature(
filtered_data_frame_boxes.XMax.to_numpy()),
standard_fields.TfExampleFields.object_class_text:
dataset_util.bytes_list_feature([
six.ensure_binary(label_text)
for label_text in filtered_data_frame_boxes.LabelName.to_numpy()
]),
standard_fields.TfExampleFields.object_class_label:
dataset_util.int64_list_feature(
filtered_data_frame_boxes.LabelName.map(
lambda x: label_map[x]).to_numpy()),
standard_fields.TfExampleFields.filename:
dataset_util.bytes_feature(
six.ensure_binary('{}.jpg'.format(image_id))),
standard_fields.TfExampleFields.source_id:
dataset_util.bytes_feature(six.ensure_binary(image_id)),
standard_fields.TfExampleFields.image_encoded:
dataset_util.bytes_feature(six.ensure_binary(encoded_image)),
}
if 'IsGroupOf' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_group_of] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsGroupOf.to_numpy().astype(int))
if 'IsOccluded' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_occluded] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsOccluded.to_numpy().astype(
int))
if 'IsTruncated' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_truncated] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsTruncated.to_numpy().astype(
int))
if 'IsDepiction' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_depiction] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsDepiction.to_numpy().astype(
int))
if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns:
feature_map[standard_fields.TfExampleFields.
image_class_label] = dataset_util.int64_list_feature(
filtered_data_frame_labels.LabelName.map(
lambda x: label_map[x]).to_numpy())
feature_map[standard_fields.TfExampleFields
.image_class_text] = dataset_util.bytes_list_feature([
six.ensure_binary(label_text) for label_text in
filtered_data_frame_labels.LabelName.to_numpy()
]),
return tf.train.Example(features=tf.train.Features(feature=feature_map))
| 5,134 | 44.442478 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_pet_tf_record.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert the Oxford pet dataset to TFRecord for object_detection.
See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar
Cats and Dogs
IEEE Conference on Computer Vision and Pattern Recognition, 2012
http://www.robots.ox.ac.uk/~vgg/data/pets/
Example usage:
python object_detection/dataset_tools/create_pet_tf_record.py \
--data_dir=/home/user/pet \
--output_dir=/home/user/pet/output
"""
import hashlib
import io
import logging
import os
import random
import re
import contextlib2
from lxml import etree
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')
flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')
flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes '
'for pet faces. Otherwise generates bounding boxes (as '
'well as segmentations for full pet bodies). Note that '
'in the latter case, the resulting files are much larger.')
flags.DEFINE_string('mask_type', 'png', 'How to represent instance '
'segmentation masks. Options are "png" or "numerical".')
flags.DEFINE_integer('num_shards', 10, 'Number of TFRecord shards')
FLAGS = flags.FLAGS
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I)
return match.groups()[0]
def dict_to_tf_example(data,
mask_path,
label_map_dict,
image_subdirectory,
ignore_difficult_instances=False,
faces_only=True,
mask_type='png'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
mask_path: String path to PNG encoded mask.
label_map_dict: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
Pascal dataset directory holding the actual image data.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(image_subdirectory, data['filename'])
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
with tf.gfile.GFile(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
mask = PIL.Image.open(encoded_png_io)
if mask.format != 'PNG':
raise ValueError('Mask format not PNG')
mask_np = np.asarray(mask)
nonbackground_indices_x = np.any(mask_np != 2, axis=0)
nonbackground_indices_y = np.any(mask_np != 2, axis=1)
nonzero_x_indices = np.where(nonbackground_indices_x)
nonzero_y_indices = np.where(nonbackground_indices_y)
width = int(data['size']['width'])
height = int(data['size']['height'])
xmins = []
ymins = []
xmaxs = []
ymaxs = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
masks = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
if faces_only:
xmin = float(obj['bndbox']['xmin'])
xmax = float(obj['bndbox']['xmax'])
ymin = float(obj['bndbox']['ymin'])
ymax = float(obj['bndbox']['ymax'])
else:
xmin = float(np.min(nonzero_x_indices))
xmax = float(np.max(nonzero_x_indices))
ymin = float(np.min(nonzero_y_indices))
ymax = float(np.max(nonzero_y_indices))
xmins.append(xmin / width)
ymins.append(ymin / height)
xmaxs.append(xmax / width)
ymaxs.append(ymax / height)
class_name = get_class_name_from_filename(data['filename'])
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
if not faces_only:
mask_remapped = (mask_np != 2).astype(np.uint8)
masks.append(mask_remapped)
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if not faces_only:
if mask_type == 'numerical':
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict['image/object/mask'] = (
dataset_util.float_list_feature(masks_flattened.tolist()))
elif mask_type == 'png':
encoded_mask_png_list = []
for mask in masks:
img = PIL.Image.fromarray(mask)
output = io.BytesIO()
img.save(output, format='PNG')
encoded_mask_png_list.append(output.getvalue())
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png_list))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
def create_tf_record(output_filename,
num_shards,
label_map_dict,
annotations_dir,
image_dir,
examples,
faces_only=True,
mask_type='png'):
"""Creates a TFRecord file from examples.
Args:
output_filename: Path to where output file is saved.
num_shards: Number of shards for output file.
label_map_dict: The label map dictionary.
annotations_dir: Directory where annotation files are stored.
image_dir: Directory where image files are stored.
examples: Examples to parse and save to tf record.
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
"""
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_filename, num_shards)
for idx, example in enumerate(examples):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples))
xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')
if not os.path.exists(xml_path):
logging.warning('Could not find %s, ignoring example.', xml_path)
continue
with tf.gfile.GFile(xml_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
try:
tf_example = dict_to_tf_example(
data,
mask_path,
label_map_dict,
image_dir,
faces_only=faces_only,
mask_type=mask_type)
if tf_example:
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
except ValueError:
logging.warning('Invalid example: %s, ignoring.', xml_path)
# TODO(derekjchow): Add test for pet/PASCAL main files.
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_val.record')
if not FLAGS.faces_only:
train_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_train.record')
val_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_val.record')
create_tf_record(
train_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
train_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
create_tf_record(
val_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
val_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
if __name__ == '__main__':
tf.app.run()
| 12,116 | 36.984326 | 80 | py |
models | models-master/research/object_detection/dataset_tools/seq_example_util.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility for object detection tf.train.SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def context_float_feature(ndarray):
"""Converts a numpy float array to a context float feature.
Args:
ndarray: A numpy float array.
Returns:
A context float feature.
"""
feature = tf.train.Feature()
for val in ndarray:
if isinstance(val, np.ndarray):
val = val.item()
feature.float_list.value.append(val)
return feature
def context_int64_feature(ndarray):
"""Converts a numpy array to a context int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A context int64 feature.
"""
feature = tf.train.Feature()
for val in ndarray:
if isinstance(val, np.ndarray):
val = val.item()
feature.int64_list.value.append(val)
return feature
def context_bytes_feature(ndarray):
"""Converts a numpy bytes array to a context bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A context bytes feature.
"""
feature = tf.train.Feature()
for val in ndarray:
if isinstance(val, np.ndarray):
val = val.tolist()
feature.bytes_list.value.append(tf.compat.as_bytes(val))
return feature
def sequence_float_feature(ndarray):
"""Converts a numpy float array to a sequence float feature.
Args:
ndarray: A numpy float array.
Returns:
A sequence float feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.float_list.value[:] = np.ravel(row)
return feature_list
def sequence_int64_feature(ndarray):
"""Converts a numpy int64 array to a sequence int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A sequence int64 feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.int64_list.value[:] = np.ravel(row)
return feature_list
def sequence_bytes_feature(ndarray):
"""Converts a bytes float array to a sequence bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A sequence bytes feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
if isinstance(row, np.ndarray):
row = row.tolist()
feature = feature_list.feature.add()
if row:
row = [tf.compat.as_bytes(val) for val in row]
feature.bytes_list.value[:] = np.ravel(row)
return feature_list
def sequence_strings_feature(strings):
new_str_arr = []
for single_str in strings:
new_str_arr.append(tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[single_str.encode('utf8')])))
return tf.train.FeatureList(feature=new_str_arr)
def boxes_to_box_components(bboxes):
"""Converts a list of numpy arrays (boxes) to box components.
Args:
bboxes: A numpy array of bounding boxes.
Returns:
Bounding box component lists.
"""
ymin_list = []
xmin_list = []
ymax_list = []
xmax_list = []
for bbox in bboxes:
if bbox != []: # pylint: disable=g-explicit-bool-comparison
bbox = np.array(bbox).astype(np.float32)
ymin, xmin, ymax, xmax = np.split(bbox, 4, axis=1)
else:
ymin, xmin, ymax, xmax = [], [], [], []
ymin_list.append(np.reshape(ymin, [-1]))
xmin_list.append(np.reshape(xmin, [-1]))
ymax_list.append(np.reshape(ymax, [-1]))
xmax_list.append(np.reshape(xmax, [-1]))
return ymin_list, xmin_list, ymax_list, xmax_list
def make_sequence_example(dataset_name,
video_id,
encoded_images,
image_height,
image_width,
image_format=None,
image_source_ids=None,
timestamps=None,
is_annotated=None,
bboxes=None,
label_strings=None,
detection_bboxes=None,
detection_classes=None,
detection_scores=None,
use_strs_for_source_id=False,
context_features=None,
context_feature_length=None,
context_features_image_id_list=None):
"""Constructs tf.SequenceExamples.
Args:
dataset_name: String with dataset name.
video_id: String with video id.
encoded_images: A [num_frames] list (or numpy array) of encoded image
frames.
image_height: Height of the images.
image_width: Width of the images.
image_format: Format of encoded images.
image_source_ids: (Optional) A [num_frames] list of unique string ids for
each image.
timestamps: (Optional) A [num_frames] list (or numpy array) array with image
timestamps.
is_annotated: (Optional) A [num_frames] list (or numpy array) array
in which each element indicates whether the frame has been annotated
(1) or not (0).
bboxes: (Optional) A list (with num_frames elements) of [num_boxes_i, 4]
numpy float32 arrays holding boxes for each frame.
label_strings: (Optional) A list (with num_frames_elements) of [num_boxes_i]
numpy string arrays holding object string labels for each frame.
detection_bboxes: (Optional) A list (with num_frames elements) of
[num_boxes_i, 4] numpy float32 arrays holding prediction boxes for each
frame.
detection_classes: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy int64 arrays holding predicted classes for each frame.
detection_scores: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy float32 arrays holding predicted object scores for
each frame.
use_strs_for_source_id: (Optional) Whether to write the source IDs as
strings rather than byte lists of characters.
context_features: (Optional) A list or numpy array of features to use in
Context R-CNN, of length num_context_features * context_feature_length.
context_feature_length: (Optional) The length of each context feature, used
for reshaping.
context_features_image_id_list: (Optional) A list of image ids of length
num_context_features corresponding to the context features.
Returns:
A tf.train.SequenceExample.
"""
num_frames = len(encoded_images)
image_encoded = np.expand_dims(encoded_images, axis=-1)
if timestamps is None:
timestamps = np.arange(num_frames)
image_timestamps = np.expand_dims(timestamps, axis=-1)
# Context fields.
context_dict = {
'example/dataset_name': context_bytes_feature([dataset_name]),
'clip/start/timestamp': context_int64_feature([image_timestamps[0][0]]),
'clip/end/timestamp': context_int64_feature([image_timestamps[-1][0]]),
'clip/frames': context_int64_feature([num_frames]),
'image/channels': context_int64_feature([3]),
'image/height': context_int64_feature([image_height]),
'image/width': context_int64_feature([image_width]),
'clip/media_id': context_bytes_feature([video_id])
}
# Sequence fields.
feature_list = {
'image/encoded': sequence_bytes_feature(image_encoded),
'image/timestamp': sequence_int64_feature(image_timestamps),
}
# Add optional fields.
if image_format is not None:
context_dict['image/format'] = context_bytes_feature([image_format])
if image_source_ids is not None:
if use_strs_for_source_id:
feature_list['image/source_id'] = sequence_strings_feature(
image_source_ids)
else:
feature_list['image/source_id'] = sequence_bytes_feature(image_source_ids)
if bboxes is not None:
bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax = boxes_to_box_components(bboxes)
feature_list['region/bbox/xmin'] = sequence_float_feature(bbox_xmin)
feature_list['region/bbox/xmax'] = sequence_float_feature(bbox_xmax)
feature_list['region/bbox/ymin'] = sequence_float_feature(bbox_ymin)
feature_list['region/bbox/ymax'] = sequence_float_feature(bbox_ymax)
if is_annotated is None:
is_annotated = np.ones(num_frames, dtype=np.int64)
is_annotated = np.expand_dims(is_annotated, axis=-1)
feature_list['region/is_annotated'] = sequence_int64_feature(is_annotated)
if label_strings is not None:
feature_list['region/label/string'] = sequence_bytes_feature(
label_strings)
if detection_bboxes is not None:
det_bbox_ymin, det_bbox_xmin, det_bbox_ymax, det_bbox_xmax = (
boxes_to_box_components(detection_bboxes))
feature_list['predicted/region/bbox/xmin'] = sequence_float_feature(
det_bbox_xmin)
feature_list['predicted/region/bbox/xmax'] = sequence_float_feature(
det_bbox_xmax)
feature_list['predicted/region/bbox/ymin'] = sequence_float_feature(
det_bbox_ymin)
feature_list['predicted/region/bbox/ymax'] = sequence_float_feature(
det_bbox_ymax)
if detection_classes is not None:
feature_list['predicted/region/label/index'] = sequence_int64_feature(
detection_classes)
if detection_scores is not None:
feature_list['predicted/region/label/confidence'] = sequence_float_feature(
detection_scores)
if context_features is not None:
context_dict['image/context_features'] = context_float_feature(
context_features)
if context_feature_length is not None:
context_dict['image/context_feature_length'] = context_int64_feature(
context_feature_length)
if context_features_image_id_list is not None:
context_dict['image/context_features_image_id_list'] = (
context_bytes_feature(context_features_image_id_list))
context = tf.train.Features(feature=context_dict)
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
sequence_example = tf.train.SequenceExample(
context=context,
feature_lists=feature_lists)
return sequence_example
| 10,770 | 34.199346 | 80 | py |
models | models-master/research/object_detection/dataset_tools/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/dataset_tools/create_pascal_tf_record.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--year=VOC2012 \
--output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('set', 'train', 'Convert training set, validation set or '
'merged set.')
flags.DEFINE_string('annotations_dir', 'Annotations',
'(Relative) path to annotations directory.')
flags.DEFINE_string('year', 'VOC2007', 'Desired challenge year.')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('label_map_path', 'data/pascal_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
SETS = ['train', 'val', 'trainval', 'test']
YEARS = ['VOC2007', 'VOC2012', 'merged']
def dict_to_tf_example(data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def main(_):
if FLAGS.set not in SETS:
raise ValueError('set must be in : {}'.format(SETS))
if FLAGS.year not in YEARS:
raise ValueError('year must be in : {}'.format(YEARS))
data_dir = FLAGS.data_dir
years = ['VOC2007', 'VOC2012']
if FLAGS.year != 'merged':
years = [FLAGS.year]
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
for year in years:
logging.info('Reading from PASCAL %s dataset.', year)
examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main',
'aeroplane_' + FLAGS.set + '.txt')
annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir)
examples_list = dataset_util.read_examples_list(examples_path)
for idx, example in enumerate(examples_list):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples_list))
path = os.path.join(annotations_dir, example + '.xml')
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict,
FLAGS.ignore_difficult_instances)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.app.run()
| 7,127 | 37.322581 | 80 | py |
models | models-master/research/object_detection/dataset_tools/create_kitti_tf_record.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw KITTI detection dataset to TFRecord for object_detection.
Converts KITTI detection dataset to TFRecords with a standard format allowing
to use this dataset to train object detectors. The raw dataset can be
downloaded from:
http://kitti.is.tue.mpg.de/kitti/data_object_image_2.zip.
http://kitti.is.tue.mpg.de/kitti/data_object_label_2.zip
Permission can be requested at the main website.
KITTI detection dataset contains 7481 training images. Using this code with
the default settings will set aside the first 500 images as a validation set.
This can be altered using the flags, see details below.
Example usage:
python object_detection/dataset_tools/create_kitti_tf_record.py \
--data_dir=/home/user/kitti \
--output_path=/home/user/kitti.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import numpy as np
import PIL.Image as pil
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
from object_detection.utils.np_box_ops import iou
tf.app.flags.DEFINE_string('data_dir', '', 'Location of root directory for the '
'data. Folder structure is assumed to be:'
'<data_dir>/training/label_2 (annotations) and'
'<data_dir>/data_object_image_2/training/image_2'
'(images).')
tf.app.flags.DEFINE_string('output_path', '', 'Path to which TFRecord files'
'will be written. The TFRecord with the training set'
'will be located at: <output_path>_train.tfrecord.'
'And the TFRecord with the validation set will be'
'located at: <output_path>_val.tfrecord')
tf.app.flags.DEFINE_string('classes_to_use', 'car,pedestrian,dontcare',
'Comma separated list of class names that will be'
'used. Adding the dontcare class will remove all'
'bboxs in the dontcare regions.')
tf.app.flags.DEFINE_string('label_map_path', 'data/kitti_label_map.pbtxt',
'Path to label map proto.')
tf.app.flags.DEFINE_integer('validation_set_size', '500', 'Number of images to'
'be used as a validation set.')
FLAGS = tf.app.flags.FLAGS
def convert_kitti_to_tfrecords(data_dir, output_path, classes_to_use,
label_map_path, validation_set_size):
"""Convert the KITTI detection dataset to TFRecords.
Args:
data_dir: The full path to the unzipped folder containing the unzipped data
from data_object_image_2 and data_object_label_2.zip.
Folder structure is assumed to be: data_dir/training/label_2 (annotations)
and data_dir/data_object_image_2/training/image_2 (images).
output_path: The path to which TFRecord files will be written. The TFRecord
with the training set will be located at: <output_path>_train.tfrecord
And the TFRecord with the validation set will be located at:
<output_path>_val.tfrecord
classes_to_use: List of strings naming the classes for which data should be
converted. Use the same names as presented in the KIITI README file.
Adding dontcare class will remove all other bounding boxes that overlap
with areas marked as dontcare regions.
label_map_path: Path to label map proto
validation_set_size: How many images should be left as the validation set.
(Ffirst `validation_set_size` examples are selected to be in the
validation set).
"""
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
train_count = 0
val_count = 0
annotation_dir = os.path.join(data_dir,
'training',
'label_2')
image_dir = os.path.join(data_dir,
'data_object_image_2',
'training',
'image_2')
train_writer = tf.python_io.TFRecordWriter('%s_train.tfrecord'%
output_path)
val_writer = tf.python_io.TFRecordWriter('%s_val.tfrecord'%
output_path)
images = sorted(tf.gfile.ListDirectory(image_dir))
for img_name in images:
img_num = int(img_name.split('.')[0])
is_validation_img = img_num < validation_set_size
img_anno = read_annotation_file(os.path.join(annotation_dir,
str(img_num).zfill(6)+'.txt'))
image_path = os.path.join(image_dir, img_name)
# Filter all bounding boxes of this frame that are of a legal class, and
# don't overlap with a dontcare region.
# TODO(talremez) filter out targets that are truncated or heavily occluded.
annotation_for_image = filter_annotations(img_anno, classes_to_use)
example = prepare_example(image_path, annotation_for_image, label_map_dict)
if is_validation_img:
val_writer.write(example.SerializeToString())
val_count += 1
else:
train_writer.write(example.SerializeToString())
train_count += 1
train_writer.close()
val_writer.close()
def prepare_example(image_path, annotations, label_map_dict):
"""Converts a dictionary with annotations for an image to tf.Example proto.
Args:
image_path: The complete path to image.
annotations: A dictionary representing the annotation of a single object
that appears in the image.
label_map_dict: A map from string label names to integer ids.
Returns:
example: The converted tf.Example.
"""
with tf.gfile.GFile(image_path, 'rb') as fid:
encoded_png = fid.read()
encoded_png_io = io.BytesIO(encoded_png)
image = pil.open(encoded_png_io)
image = np.asarray(image)
key = hashlib.sha256(encoded_png).hexdigest()
width = int(image.shape[1])
height = int(image.shape[0])
xmin_norm = annotations['2d_bbox_left'] / float(width)
ymin_norm = annotations['2d_bbox_top'] / float(height)
xmax_norm = annotations['2d_bbox_right'] / float(width)
ymax_norm = annotations['2d_bbox_bottom'] / float(height)
difficult_obj = [0]*len(xmin_norm)
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_png),
'image/format': dataset_util.bytes_feature('png'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin_norm),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax_norm),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin_norm),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax_norm),
'image/object/class/text': dataset_util.bytes_list_feature(
[x.encode('utf8') for x in annotations['type']]),
'image/object/class/label': dataset_util.int64_list_feature(
[label_map_dict[x] for x in annotations['type']]),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.float_list_feature(
annotations['truncated']),
'image/object/alpha': dataset_util.float_list_feature(
annotations['alpha']),
'image/object/3d_bbox/height': dataset_util.float_list_feature(
annotations['3d_bbox_height']),
'image/object/3d_bbox/width': dataset_util.float_list_feature(
annotations['3d_bbox_width']),
'image/object/3d_bbox/length': dataset_util.float_list_feature(
annotations['3d_bbox_length']),
'image/object/3d_bbox/x': dataset_util.float_list_feature(
annotations['3d_bbox_x']),
'image/object/3d_bbox/y': dataset_util.float_list_feature(
annotations['3d_bbox_y']),
'image/object/3d_bbox/z': dataset_util.float_list_feature(
annotations['3d_bbox_z']),
'image/object/3d_bbox/rot_y': dataset_util.float_list_feature(
annotations['3d_bbox_rot_y']),
}))
return example
def filter_annotations(img_all_annotations, used_classes):
"""Filters out annotations from the unused classes and dontcare regions.
Filters out the annotations that belong to classes we do now wish to use and
(optionally) also removes all boxes that overlap with dontcare regions.
Args:
img_all_annotations: A list of annotation dictionaries. See documentation of
read_annotation_file for more details about the format of the annotations.
used_classes: A list of strings listing the classes we want to keep, if the
list contains "dontcare", all bounding boxes with overlapping with dont
care regions will also be filtered out.
Returns:
img_filtered_annotations: A list of annotation dictionaries that have passed
the filtering.
"""
img_filtered_annotations = {}
# Filter the type of the objects.
relevant_annotation_indices = [
i for i, x in enumerate(img_all_annotations['type']) if x in used_classes
]
for key in img_all_annotations.keys():
img_filtered_annotations[key] = (
img_all_annotations[key][relevant_annotation_indices])
if 'dontcare' in used_classes:
dont_care_indices = [i for i,
x in enumerate(img_filtered_annotations['type'])
if x == 'dontcare']
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = np.stack([img_filtered_annotations['2d_bbox_top'],
img_filtered_annotations['2d_bbox_left'],
img_filtered_annotations['2d_bbox_bottom'],
img_filtered_annotations['2d_bbox_right']],
axis=1)
ious = iou(boxes1=all_boxes,
boxes2=all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > 0.0
for key in img_all_annotations.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def read_annotation_file(filename):
"""Reads a KITTI annotation file.
Converts a KITTI annotation file into a dictionary containing all the
relevant information.
Args:
filename: the path to the annotataion text file.
Returns:
anno: A dictionary with the converted annotation information. See annotation
README file for details on the different fields.
"""
with open(filename) as f:
content = f.readlines()
content = [x.strip().split(' ') for x in content]
anno = {}
anno['type'] = np.array([x[0].lower() for x in content])
anno['truncated'] = np.array([float(x[1]) for x in content])
anno['occluded'] = np.array([int(x[2]) for x in content])
anno['alpha'] = np.array([float(x[3]) for x in content])
anno['2d_bbox_left'] = np.array([float(x[4]) for x in content])
anno['2d_bbox_top'] = np.array([float(x[5]) for x in content])
anno['2d_bbox_right'] = np.array([float(x[6]) for x in content])
anno['2d_bbox_bottom'] = np.array([float(x[7]) for x in content])
anno['3d_bbox_height'] = np.array([float(x[8]) for x in content])
anno['3d_bbox_width'] = np.array([float(x[9]) for x in content])
anno['3d_bbox_length'] = np.array([float(x[10]) for x in content])
anno['3d_bbox_x'] = np.array([float(x[11]) for x in content])
anno['3d_bbox_y'] = np.array([float(x[12]) for x in content])
anno['3d_bbox_z'] = np.array([float(x[13]) for x in content])
anno['3d_bbox_rot_y'] = np.array([float(x[14]) for x in content])
return anno
def main(_):
convert_kitti_to_tfrecords(
data_dir=FLAGS.data_dir,
output_path=FLAGS.output_path,
classes_to_use=FLAGS.classes_to_use.split(','),
label_map_path=FLAGS.label_map_path,
validation_set_size=FLAGS.validation_set_size)
if __name__ == '__main__':
tf.app.run()
| 13,073 | 41.038585 | 80 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for create_cococameratraps_tfexample_main."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import tempfile
import unittest
import numpy as np
from PIL import Image
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
IMAGE_HEIGHT = 360
IMAGE_WIDTH = 480
def _write_random_images_to_directory(self, directory, num_frames):
for frame_num in range(num_frames):
img = np.random.randint(0, high=256,
size=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3),
dtype=np.uint8)
pil_image = Image.fromarray(img)
fname = 'im_' + str(frame_num) + '.jpg'
pil_image.save(os.path.join(directory, fname), 'JPEG')
def _create_json_file(self, directory, num_frames, keep_bboxes=False):
json_dict = {'images': [], 'annotations': []}
json_dict['categories'] = [{'id': 0, 'name': 'empty'},
{'id': 1, 'name': 'animal'}]
for idx in range(num_frames):
im = {'id': 'im_' + str(idx),
'file_name': 'im_' + str(idx) + '.jpg',
'height': self.IMAGE_HEIGHT,
'width': self.IMAGE_WIDTH,
'seq_id': 'seq_1',
'seq_num_frames': num_frames,
'frame_num': idx,
'location': 'loc_' + str(idx),
'date_captured': str(datetime.datetime.now())
}
json_dict['images'].append(im)
ann = {'id': 'ann' + str(idx),
'image_id': 'im_' + str(idx),
'category_id': 1,
}
if keep_bboxes:
ann['bbox'] = [0.0 * self.IMAGE_WIDTH,
0.1 * self.IMAGE_HEIGHT,
0.5 * self.IMAGE_WIDTH,
0.5 * self.IMAGE_HEIGHT]
json_dict['annotations'].append(ann)
json_path = os.path.join(directory, 'test_file.json')
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_dict, f)
return json_path
def assert_expected_example_bbox(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_beam_pipeline(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
def test_beam_pipeline_bbox(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames, keep_bboxes=True)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord,
keep_bboxes=True)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example_bbox(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 8,166 | 36.986047 | 133 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Beam pipeline to create COCO Camera Traps Object Detection TFRecords.
Please note that this tool creates sharded output files.
This tool assumes the input annotations are in the COCO Camera Traps json
format, specified here:
https://github.com/Microsoft/CameraTraps/blob/master/data_management/README.md
Example usage:
python create_cococameratraps_tfexample_main.py \
--alsologtostderr \
--output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \
--image_directory="/path/to/image/folder/" \
--input_annotations_file="path/to/annotations.json"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import hashlib
import io
import json
import os
import numpy as np
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class ParseImage(beam.DoFn):
"""A DoFn that parses a COCO-CameraTraps json and emits TFRecords."""
def __init__(self, image_directory, images, annotations, categories,
keep_bboxes):
"""Initialization function.
Args:
image_directory: Path to image directory
images: list of COCO Camera Traps style image dictionaries
annotations: list of COCO Camera Traps style annotation dictionaries
categories: list of COCO Camera Traps style category dictionaries
keep_bboxes: Whether to keep any bounding boxes that exist in the
annotations
"""
self._image_directory = image_directory
self._image_dict = {im['id']: im for im in images}
self._annotation_dict = {im['id']: [] for im in images}
self._category_dict = {int(cat['id']): cat for cat in categories}
for ann in annotations:
self._annotation_dict[ann['image_id']].append(ann)
self._images = images
self._keep_bboxes = keep_bboxes
self._num_examples_processed = beam.metrics.Metrics.counter(
'cococameratraps_data_generation', 'num_tf_examples_processed')
def process(self, image_id):
"""Builds a tf.Example given an image id.
Args:
image_id: the image id of the associated image
Returns:
List of tf.Examples.
"""
image = self._image_dict[image_id]
annotations = self._annotation_dict[image_id]
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
image_location_id = image['location']
image_datetime = str(image['date_captured'])
image_sequence_id = str(image['seq_id'])
image_sequence_num_frames = int(image['seq_num_frames'])
image_sequence_frame_num = int(image['frame_num'])
full_path = os.path.join(self._image_directory, filename)
try:
# Ensure the image exists and is not corrupted
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
image = tf.io.decode_jpeg(encoded_jpg, channels=3)
except Exception: # pylint: disable=broad-except
# The image file is missing or corrupt
return []
key = hashlib.sha256(encoded_jpg).hexdigest()
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/location':
dataset_util.bytes_feature(str(image_location_id).encode('utf8')),
'image/seq_num_frames':
dataset_util.int64_feature(image_sequence_num_frames),
'image/seq_frame_num':
dataset_util.int64_feature(image_sequence_frame_num),
'image/seq_id':
dataset_util.bytes_feature(image_sequence_id.encode('utf8')),
'image/date_captured':
dataset_util.bytes_feature(image_datetime.encode('utf8'))
}
num_annotations_skipped = 0
if annotations:
xmin = []
xmax = []
ymin = []
ymax = []
category_names = []
category_ids = []
area = []
for object_annotations in annotations:
if 'bbox' in object_annotations and self._keep_bboxes:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
if 'area' in object_annotations:
area.append(object_annotations['area'])
else:
# approximate area using l*w/2
area.append(width*height/2.0)
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(
self._category_dict[category_id]['name'].encode('utf8'))
feature_dict.update({
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/area':
dataset_util.float_list_feature(area),
})
# For classification, add the first category to image/class/label and
# image/class/text
if not category_ids:
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([0]),
'image/class/text':
dataset_util.bytes_list_feature(['empty'.encode('utf8')]),
})
else:
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([category_ids[0]]),
'image/class/text':
dataset_util.bytes_list_feature([category_names[0]]),
})
else:
# Add empty class if there are no annotations
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([0]),
'image/class/text':
dataset_util.bytes_list_feature(['empty'.encode('utf8')]),
})
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
self._num_examples_processed.inc(1)
return [(example)]
def load_json_data(data_file):
with tf.io.gfile.GFile(data_file, 'r') as fid:
data_dict = json.load(fid)
return data_dict
def create_pipeline(pipeline,
image_directory,
input_annotations_file,
output_tfrecord_prefix=None,
num_images_per_shard=200,
keep_bboxes=True):
"""Creates a beam pipeline for producing a COCO-CameraTraps Image dataset.
Args:
pipeline: Initialized beam pipeline.
image_directory: Path to image directory
input_annotations_file: Path to a coco-cameratraps annotation file
output_tfrecord_prefix: Absolute path for tfrecord outputs. Final files will
be named {output_tfrecord_prefix}@N.
num_images_per_shard: The number of images to store in each shard
keep_bboxes: Whether to keep any bounding boxes that exist in the json file
"""
data = load_json_data(input_annotations_file)
num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard))
image_examples = (
pipeline | ('CreateCollections') >> beam.Create(
[im['id'] for im in data['images']])
| ('ParseImage') >> beam.ParDo(ParseImage(
image_directory, data['images'], data['annotations'],
data['categories'], keep_bboxes=keep_bboxes)))
_ = (image_examples
| ('Reshuffle') >> beam.Reshuffle()
| ('WriteTfImageExample') >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord_prefix,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example)))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_directory',
dest='image_directory',
required=True,
help='Path to the directory where the images are stored.')
parser.add_argument(
'--output_tfrecord_prefix',
dest='output_tfrecord_prefix',
required=True,
help='Path and prefix to store TFRecords containing images in tf.Example'
'format.')
parser.add_argument(
'--input_annotations_file',
dest='input_annotations_file',
required=True,
help='Path to Coco-CameraTraps style annotations file.')
parser.add_argument(
'--num_images_per_shard',
dest='num_images_per_shard',
default=200,
help='The number of images to be stored in each outputshard.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.output_tfrecord_prefix)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
create_pipeline(
pipeline=p,
image_directory=args.image_directory,
input_annotations_file=args.input_annotations_file,
output_tfrecord_prefix=args.output_tfrecord_prefix,
num_images_per_shard=args.num_images_per_shard)
p.run()
if __name__ == '__main__':
main()
| 11,636 | 33.737313 | 80 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for add_context_to_examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import datetime
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def BytesListFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def Int64ListFeature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def FloatListFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateContextDataTest(tf.test.TestCase):
def _create_first_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_1')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5, 5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena'),
six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0, 0.1]),
'image/object/bbox/xmax': FloatListFeature([0.2, 0.3]),
'image/object/bbox/ymin': FloatListFeature([0.4, 0.5]),
'image/object/bbox/ymax': FloatListFeature([0.6, 0.7]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(0),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 0, 0)))),
'image/embedding': FloatListFeature([0.1, 0.2, 0.3]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def _create_second_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_2')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0]),
'image/object/bbox/xmax': FloatListFeature([0.1]),
'image/object/bbox/ymin': FloatListFeature([0.2]),
'image/object/bbox/ymax': FloatListFeature([0.3]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(1),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 1, 0)))),
'image/embedding': FloatListFeature([0.4, 0.5, 0.6]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def assert_expected_examples(self, tf_example_list):
self.assertAllEqual(
{tf_example.features.feature['image/source_id'].bytes_list.value[0]
for tf_example in tf_example_list},
{six.ensure_binary('image_id_1'), six.ensure_binary('image_id_2')})
self.assertAllClose(
tf_example_list[0].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
self.assertAllClose(
tf_example_list[1].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
def assert_expected_sequence_example(self, tf_sequence_example_list):
tf_sequence_example = tf_sequence_example_list[0]
num_frames = 2
self.assertAllEqual(
tf_sequence_example.context.feature[
'clip/media_id'].bytes_list.value[0], six.ensure_binary(
'01_0'))
self.assertAllClose(
tf_sequence_example.context.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
seq_feature_dict = tf_sequence_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
timestamps = [0, 1]
self.assertAllEqual(timestamps, actual_timestamps)
# First image.
self.assertAllClose(
[0.4, 0.5],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.0, 0.1],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.6, 0.7],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.2, 0.3],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena'), six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Second example.
self.assertAllClose(
[0.2],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.0],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.3],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[0.1],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def assert_expected_key(self, key):
self.assertAllEqual(key, b'01')
def assert_sorted(self, example_collection):
example_list = list(example_collection)
counter = 0
for example in example_list:
frame_num = example.features.feature[
'image/seq_frame_num'].int64_list.value[0]
self.assertGreaterEqual(frame_num, counter)
counter = frame_num
def assert_context(self, example_collection):
example_list = list(example_collection)
for example in example_list:
context = example.features.feature[
'image/context_features'].float_list.value
self.assertAllClose([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], context)
def assert_resized(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 2)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 2)
def assert_size(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 4)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 4)
def test_sliding_window(self):
example_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
max_clip_length = 3
stride_length = 3
out_list = [list(i) for i in add_context_to_examples.get_sliding_window(
example_list, max_clip_length, stride_length)]
self.assertAllEqual(out_list, [['a', 'b', 'c'],
['d', 'e', 'f'],
['g']])
def test_rekey_data_fn(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = False
max_dim = None
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_size(output[0][1])
def test_rekey_data_fn_w_resize(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = True
max_dim = 2
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_resized(output[0][1])
def test_sort_fn(self):
sequence_key = 'image/seq_id'
sorted_image_ids = False
max_num_elements_in_context_features = 10
sort_fn = add_context_to_examples.SortGroupedDataFn(
sequence_key, sorted_image_ids, max_num_elements_in_context_features)
output = sort_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_second_tf_example()),
tf.train.Example.FromString(
self._create_first_tf_example())]))
self.assert_sorted(output[0][1])
def test_add_context_fn(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep)
output = context_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_first_tf_example()),
tf.train.Example.FromString(
self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
self.assert_context(output)
def test_add_context_fn_output_sequence_example(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example')
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 1)
self.assert_expected_sequence_example(output)
def test_add_context_fn_output_sequence_example_cliplen(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example', max_clip_length=1)
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
def test_beam_pipeline(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 2)
self.assert_expected_examples([tf.train.Example.FromString(
tf_example) for tf_example in actual_output])
def test_beam_pipeline_sequence_example(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards,
output_type='tf_sequence_example')
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_sequence_example(
[tf.train.SequenceExample.FromString(
tf_example) for tf_example in actual_output])
if __name__ == '__main__':
tf.test.main()
| 15,263 | 37.255639 | 119 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_detection_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(filename)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateDetectionDataTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(4),
'image/width': Int64Feature(6),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
}))
return example.SerializeToString()
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [4])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [6])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_detection_data_fn(self):
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
inference_fn = generate_detection_data.GenerateDetectionDataFn(
saved_model_path, confidence_threshold)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(generated_example)
output_example = output[0]
self.assertAllEqual(
output_example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(output_example.features.feature['image/width']
.int64_list.value, [6])
self.assert_expected_example(output_example)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_detection_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
confidence_threshold, num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 9,871 | 36.823755 | 119 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to generate embedding data for camera trap images.
This tool runs inference with an exported Object Detection model in
`saved_model` format and produce raw embeddings for camera trap data. These
embeddings contain an object-centric feature embedding from Faster R-CNN, the
datetime that the image was taken (normalized in a specific way), and the
position of the object of interest. By default, only the highest-scoring object
embedding is included.
Steps to generate a embedding dataset:
1. Use object_detection/export_inference_graph.py to get a Faster R-CNN
`saved_model` for inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example
protos containing images for inference.
Example Usage:
--------------
python tensorflow_models/object_detection/export_inference_graph.py \
--alsologtostderr \
--input_type tf_example \
--pipeline_config_path path/to/faster_rcnn_model.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory \
--additional_output_tensor_names detection_features
python generate_embedding_data.py \
--alsologtostderr \
--embedding_input_tfrecord path/to/input_tfrecords* \
--embedding_output_tfrecord path/to/output_tfrecords \
--embedding_model_dir path/to/exported_model_directory/saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
import threading
import numpy as np
import six
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
def add_keys(serialized_example):
key = hash(serialized_example)
return key, serialized_example
def drop_keys(key_value_tuple):
return key_value_tuple[1]
def get_date_captured(example):
date_captured = datetime.datetime.strptime(
six.ensure_str(
example.features.feature['image/date_captured'].bytes_list.value[0]),
'%Y-%m-%d %H:%M:%S')
return date_captured
def embed_date_captured(date_captured):
"""Encodes the datetime of the image."""
embedded_date_captured = []
month_max = 12.0
day_max = 31.0
hour_max = 24.0
minute_max = 60.0
min_year = 1990.0
max_year = 2030.0
year = (date_captured.year - min_year) / float(max_year - min_year)
embedded_date_captured.append(year)
month = (date_captured.month - 1) / month_max
embedded_date_captured.append(month)
day = (date_captured.day - 1) / day_max
embedded_date_captured.append(day)
hour = date_captured.hour / hour_max
embedded_date_captured.append(hour)
minute = date_captured.minute / minute_max
embedded_date_captured.append(minute)
return np.asarray(embedded_date_captured)
def embed_position_and_size(box):
"""Encodes the bounding box of the object of interest."""
ymin = box[0]
xmin = box[1]
ymax = box[2]
xmax = box[3]
w = xmax - xmin
h = ymax - ymin
x = xmin + w / 2.0
y = ymin + h / 2.0
return np.asarray([x, y, w, h])
def get_bb_embedding(detection_features, detection_boxes, detection_scores,
index):
embedding = detection_features[0][index]
pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0)
box = detection_boxes[0][index]
position_embedding = embed_position_and_size(box)
score = detection_scores[0][index]
return np.concatenate((pooled_embedding, position_embedding)), score
class GenerateEmbeddingDataFn(beam.DoFn):
"""Generates embedding data for camera trap images.
This Beam DoFn performs inference with an object detection `saved_model` and
produces contextual embedding vectors.
"""
session_lock = threading.Lock()
def __init__(self, model_dir, top_k_embedding_count,
bottom_k_embedding_count, embedding_type='final_box_features'):
"""Initialization function.
Args:
model_dir: A directory containing saved model.
top_k_embedding_count: the number of high-confidence embeddings to store
bottom_k_embedding_count: the number of low-confidence embeddings to store
embedding_type: One of 'final_box_features', 'rpn_box_features'
"""
self._model_dir = model_dir
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'embedding_data_generation', 'num_tf_examples_processed')
self._top_k_embedding_count = top_k_embedding_count
self._bottom_k_embedding_count = bottom_k_embedding_count
self._embedding_type = embedding_type
def setup(self):
self._load_inference_model()
def _load_inference_model(self):
# Because initialization of the tf.Session is expensive we share
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfexample_key_value):
return self._run_inference_and_generate_embedding(tfexample_key_value)
def _run_inference_and_generate_embedding(self, tfexample_key_value):
key, tfexample = tfexample_key_value
input_example = tf.train.Example.FromString(tfexample)
example = tf.train.Example()
example.CopyFrom(input_example)
try:
date_captured = get_date_captured(input_example)
unix_time = ((date_captured -
datetime.datetime.fromtimestamp(0)).total_seconds())
example.features.feature['image/unix_time'].float_list.value.extend(
[unix_time])
temporal_embedding = embed_date_captured(date_captured)
except Exception: # pylint: disable=broad-except
temporal_embedding = None
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfexample), 0)))
if self._embedding_type == 'final_box_features':
detection_features = detections['detection_features']
elif self._embedding_type == 'rpn_box_features':
detection_features = detections['cropped_rpn_box_features']
else:
raise ValueError('embedding type not supported')
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
num_detections = int(num_detections)
embed_all = []
score_all = []
detection_features = np.asarray(detection_features)
embedding_count = 0
for index in range(min(num_detections, self._top_k_embedding_count)):
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, index)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
embedding_count += 1
for index in range(
max(0, num_detections - 1),
max(-1, num_detections - 1 - self._bottom_k_embedding_count), -1):
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, index)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
embedding_count += 1
if embedding_count == 0:
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, 0)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
# Takes max in case embedding_count is 0.
embedding_length = len(embed_all) // max(1, embedding_count)
embed_all = np.asarray(embed_all)
example.features.feature['image/embedding'].float_list.value.extend(
embed_all)
example.features.feature['image/embedding_score'].float_list.value.extend(
score_all)
example.features.feature['image/embedding_length'].int64_list.value.append(
embedding_length)
example.features.feature['image/embedding_count'].int64_list.value.append(
embedding_count)
self._num_examples_processed.inc(1)
return [(key, example)]
def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir,
top_k_embedding_count, bottom_k_embedding_count,
num_shards, embedding_type):
"""Returns a beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: An TFRecord of tf.train.Example protos containing images.
output_tfrecord: An TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
model_dir: Path to `saved_model` to use for inference.
top_k_embedding_count: The number of high-confidence embeddings to store.
bottom_k_embedding_count: The number of low-confidence embeddings to store.
num_shards: The number of output shards.
embedding_type: Which features to embed.
"""
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord, coder=beam.coders.BytesCoder())
| 'AddKeys' >> beam.Map(add_keys))
output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo(
GenerateEmbeddingDataFn(model_dir, top_k_embedding_count,
bottom_k_embedding_count, embedding_type))
output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle()
_ = output_collection | 'DropKeys' >> beam.Map(
drop_keys) | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--embedding_input_tfrecord',
dest='embedding_input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection.')
parser.add_argument(
'--embedding_output_tfrecord',
dest='embedding_output_tfrecord',
required=True,
help='TFRecord containing embeddings in tf.Example format.')
parser.add_argument(
'--embedding_model_dir',
dest='embedding_model_dir',
required=True,
help='Path to directory containing an object detection SavedModel with'
'detection_box_classifier_features in the output.')
parser.add_argument(
'--top_k_embedding_count',
dest='top_k_embedding_count',
default=1,
help='The number of top k embeddings to add to the memory bank.')
parser.add_argument(
'--bottom_k_embedding_count',
dest='bottom_k_embedding_count',
default=0,
help='The number of bottom k embeddings to add to the memory bank.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
parser.add_argument(
'--embedding_type',
dest='embedding_type',
default='final_box_features',
help='What features to embed, supports `final_box_features`, '
'`rpn_box_features`.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.embedding_output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.embedding_input_tfrecord,
args.embedding_output_tfrecord,
args.embedding_model_dir,
args.top_k_embedding_count,
args.bottom_k_embedding_count,
args.num_shards,
args.embedding_type)
p.run()
if __name__ == '__main__':
main()
| 13,112 | 34.345013 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.