repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to add contextual memory banks to tf.Examples.
This tool groups images containing bounding boxes and embedded context features
by a key, either `image/location` or `image/seq_id`, and time horizon,
then uses these groups to build up a contextual memory bank from the embedded
context features from each image in the group and adds that context to the
output tf.Examples for each image in the group.
Steps to generate a dataset with context from one with bounding boxes and
embedded context features:
1. Use object/detection/export_inference_graph.py to get a `saved_model` for
inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and a TFRecord of tf.Example
protos containing images, bounding boxes, and embedded context features.
The context features can be added to tf.Examples using
generate_embedding_data.py.
Example Usage:
--------------
python add_context_to_examples.py \
--input_tfrecord path/to/input_tfrecords* \
--output_tfrecord path/to/output_tfrecords \
--sequence_key image/location \
--time_horizon month
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import datetime
import io
import itertools
import json
import os
import numpy as np
import PIL.Image
import six
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class ReKeyDataFn(beam.DoFn):
"""Re-keys tfrecords by sequence_key.
This Beam DoFn re-keys the tfrecords by a user-defined sequence_key
"""
def __init__(self, sequence_key, time_horizon,
reduce_image_size, max_image_dimension):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
Must point to a key of type bytes_list
time_horizon: What length of time to use to partition the data when
building the memory banks. Options: `year`, `month`, `week`, `day `,
`hour`, `minute`, None
reduce_image_size: Whether to reduce the sizes of the stored images.
max_image_dimension: maximum dimension of reduced images
"""
self._sequence_key = sequence_key
if time_horizon is None or time_horizon in {'year', 'month', 'week', 'day',
'hour', 'minute'}:
self._time_horizon = time_horizon
else:
raise ValueError('Time horizon not supported.')
self._reduce_image_size = reduce_image_size
self._max_image_dimension = max_image_dimension
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'data_rekey', 'num_tf_examples_processed')
self._num_images_resized = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_resized')
self._num_images_read = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_read')
self._num_images_found = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_read')
self._num_got_shape = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_got_shape')
self._num_images_found_size = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_found_size')
self._num_examples_cleared = beam.metrics.Metrics.counter(
'data_rekey', 'num_examples_cleared')
self._num_examples_updated = beam.metrics.Metrics.counter(
'data_rekey', 'num_examples_updated')
def process(self, tfrecord_entry):
return self._rekey_examples(tfrecord_entry)
def _largest_size_at_most(self, height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Args:
height: an int indicating the current height.
width: an int indicating the current width.
largest_side: A python integer indicating the size of
the largest side after resize.
Returns:
new_height: an int indicating the new height.
new_width: an int indicating the new width.
"""
x_scale = float(largest_side) / float(width)
y_scale = float(largest_side) / float(height)
scale = min(x_scale, y_scale)
new_width = int(width * scale)
new_height = int(height * scale)
return new_height, new_width
def _resize_image(self, input_example):
"""Resizes the image within input_example and updates the height and width.
Args:
input_example: A tf.Example that we want to update to contain a resized
image.
Returns:
input_example: Updated tf.Example.
"""
original_image = copy.deepcopy(
input_example.features.feature['image/encoded'].bytes_list.value[0])
self._num_images_read.inc(1)
height = copy.deepcopy(
input_example.features.feature['image/height'].int64_list.value[0])
width = copy.deepcopy(
input_example.features.feature['image/width'].int64_list.value[0])
self._num_got_shape.inc(1)
new_height, new_width = self._largest_size_at_most(
height, width, self._max_image_dimension)
self._num_images_found_size.inc(1)
encoded_jpg_io = io.BytesIO(original_image)
image = PIL.Image.open(encoded_jpg_io)
resized_image = image.resize((new_width, new_height))
with io.BytesIO() as output:
resized_image.save(output, format='JPEG')
encoded_resized_image = output.getvalue()
self._num_images_resized.inc(1)
del input_example.features.feature['image/encoded'].bytes_list.value[:]
del input_example.features.feature['image/height'].int64_list.value[:]
del input_example.features.feature['image/width'].int64_list.value[:]
self._num_examples_cleared.inc(1)
input_example.features.feature['image/encoded'].bytes_list.value.extend(
[encoded_resized_image])
input_example.features.feature['image/height'].int64_list.value.extend(
[new_height])
input_example.features.feature['image/width'].int64_list.value.extend(
[new_width])
self._num_examples_updated.inc(1)
return input_example
def _rekey_examples(self, tfrecord_entry):
serialized_example = copy.deepcopy(tfrecord_entry)
input_example = tf.train.Example.FromString(serialized_example)
self._num_images_found.inc(1)
if self._reduce_image_size:
input_example = self._resize_image(input_example)
self._num_images_resized.inc(1)
new_key = input_example.features.feature[
self._sequence_key].bytes_list.value[0]
if self._time_horizon:
date_captured = datetime.datetime.strptime(
six.ensure_str(input_example.features.feature[
'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S')
year = date_captured.year
month = date_captured.month
day = date_captured.day
week = np.floor(float(day) / float(7))
hour = date_captured.hour
minute = date_captured.minute
if self._time_horizon == 'year':
new_key = new_key + six.ensure_binary('/' + str(year))
elif self._time_horizon == 'month':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month))
elif self._time_horizon == 'week':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(week))
elif self._time_horizon == 'day':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day))
elif self._time_horizon == 'hour':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + (
str(hour)))
elif self._time_horizon == 'minute':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + (
str(hour) + '/' + str(minute)))
self._num_examples_processed.inc(1)
return [(new_key, input_example)]
class SortGroupedDataFn(beam.DoFn):
"""Sorts data within a keyed group.
This Beam DoFn sorts the grouped list of image examples by frame_num
"""
def __init__(self, sequence_key, sorted_image_ids,
max_num_elements_in_context_features):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
Must point to a key of type bytes_list
sorted_image_ids: Whether the image ids are sortable to use as sorting
tie-breakers
max_num_elements_in_context_features: The maximum number of elements
allowed in the memory bank
"""
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'sort_group', 'num_groups_sorted')
self._too_many_elements = beam.metrics.Metrics.counter(
'sort_group', 'too_many_elements')
self._split_elements = beam.metrics.Metrics.counter(
'sort_group', 'split_elements')
self._sequence_key = six.ensure_binary(sequence_key)
self._sorted_image_ids = sorted_image_ids
self._max_num_elements_in_context_features = (
max_num_elements_in_context_features)
def process(self, grouped_entry):
return self._sort_image_examples(grouped_entry)
def _sort_image_examples(self, grouped_entry):
key, example_collection = grouped_entry
example_list = list(example_collection)
def get_frame_num(example):
return example.features.feature['image/seq_frame_num'].int64_list.value[0]
def get_date_captured(example):
return datetime.datetime.strptime(
six.ensure_str(
example.features.feature[
'image/date_captured'].bytes_list.value[0]),
'%Y-%m-%d %H:%M:%S')
def get_image_id(example):
return example.features.feature['image/source_id'].bytes_list.value[0]
if self._sequence_key == six.ensure_binary('image/seq_id'):
sorting_fn = get_frame_num
elif self._sequence_key == six.ensure_binary('image/location'):
if self._sorted_image_ids:
sorting_fn = get_image_id
else:
sorting_fn = get_date_captured
sorted_example_list = sorted(example_list, key=sorting_fn)
num_embeddings = 0
for example in sorted_example_list:
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
self._num_examples_processed.inc(1)
# To handle cases where there are more context embeddings within
# the time horizon than the specified maximum, we split the context group
# into subsets sequentially in time, with each subset having the maximum
# number of context embeddings except the final one, which holds the
# remainder.
if num_embeddings > self._max_num_elements_in_context_features:
leftovers = sorted_example_list
output_list = []
count = 0
self._too_many_elements.inc(1)
num_embeddings = 0
max_idx = 0
for idx, example in enumerate(leftovers):
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
if num_embeddings <= self._max_num_elements_in_context_features:
max_idx = idx
while num_embeddings > self._max_num_elements_in_context_features:
self._split_elements.inc(1)
new_key = key + six.ensure_binary('_' + str(count))
new_list = leftovers[:max_idx]
output_list.append((new_key, new_list))
leftovers = leftovers[max_idx:]
count += 1
num_embeddings = 0
max_idx = 0
for idx, example in enumerate(leftovers):
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
if num_embeddings <= self._max_num_elements_in_context_features:
max_idx = idx
new_key = key + six.ensure_binary('_' + str(count))
output_list.append((new_key, leftovers))
else:
output_list = [(key, sorted_example_list)]
return output_list
def get_sliding_window(example_list, max_clip_length, stride_length):
"""Yields a sliding window over data from example_list.
Sliding window has width max_clip_len (n) and stride stride_len (m).
s -> (s0,s1,...s[n-1]), (s[m],s[m+1],...,s[m+n]), ...
Args:
example_list: A list of examples.
max_clip_length: The maximum length of each clip.
stride_length: The stride between each clip.
Yields:
A list of lists of examples, each with length <= max_clip_length
"""
# check if the list is too short to slide over
if len(example_list) < max_clip_length:
yield example_list
else:
starting_values = [i*stride_length for i in
range(len(example_list)) if
len(example_list) > i*stride_length]
for start in starting_values:
result = tuple(itertools.islice(example_list, start,
min(start + max_clip_length,
len(example_list))))
yield result
class GenerateContextFn(beam.DoFn):
"""Generates context data for camera trap images.
This Beam DoFn builds up contextual memory banks from groups of images and
stores them in the output tf.Example or tf.Sequence_example for each image.
"""
def __init__(self, sequence_key, add_context_features, image_ids_to_keep,
keep_context_features_image_id_list=False,
subsample_context_features_rate=0,
keep_only_positives=False,
context_features_score_threshold=0.7,
keep_only_positives_gt=False,
max_num_elements_in_context_features=5000,
pad_context_features=False,
output_type='tf_example', max_clip_length=None,
context_feature_length=2057):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
add_context_features: Whether to keep and store the contextual memory
bank.
image_ids_to_keep: A list of image ids to save, to use to build data
subsets for evaluation.
keep_context_features_image_id_list: Whether to save an ordered list of
the ids of the images in the contextual memory bank.
subsample_context_features_rate: What rate to subsample images for the
contextual memory bank.
keep_only_positives: Whether to only keep high scoring
(>context_features_score_threshold) features in the contextual memory
bank.
context_features_score_threshold: What threshold to use for keeping
features.
keep_only_positives_gt: Whether to only keep features from images that
contain objects based on the ground truth (for training).
max_num_elements_in_context_features: the maximum number of elements in
the memory bank
pad_context_features: Whether to pad the context features to a fixed size.
output_type: What type of output, tf_example of tf_sequence_example
max_clip_length: The maximum length of a sequence example, before
splitting into multiple
context_feature_length: The length of the context feature embeddings
stored in the input data.
"""
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'sequence_data_generation', 'num_seq_examples_processed')
self._num_keys_processed = beam.metrics.Metrics.counter(
'sequence_data_generation', 'num_keys_processed')
self._sequence_key = sequence_key
self._add_context_features = add_context_features
self._pad_context_features = pad_context_features
self._output_type = output_type
self._max_clip_length = max_clip_length
if six.ensure_str(image_ids_to_keep) == 'All':
self._image_ids_to_keep = None
else:
with tf.io.gfile.GFile(image_ids_to_keep) as f:
self._image_ids_to_keep = json.load(f)
self._keep_context_features_image_id_list = (
keep_context_features_image_id_list)
self._subsample_context_features_rate = subsample_context_features_rate
self._keep_only_positives = keep_only_positives
self._keep_only_positives_gt = keep_only_positives_gt
self._context_features_score_threshold = context_features_score_threshold
self._max_num_elements_in_context_features = (
max_num_elements_in_context_features)
self._context_feature_length = context_feature_length
self._images_kept = beam.metrics.Metrics.counter(
'sequence_data_generation', 'images_kept')
self._images_loaded = beam.metrics.Metrics.counter(
'sequence_data_generation', 'images_loaded')
def process(self, grouped_entry):
return self._add_context_to_example(copy.deepcopy(grouped_entry))
def _build_context_features(self, example_list):
context_features = []
context_features_image_id_list = []
count = 0
example_embedding = []
for idx, example in enumerate(example_list):
if self._subsample_context_features_rate > 0:
if (idx % self._subsample_context_features_rate) != 0:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
if self._keep_only_positives:
if example.features.feature[
'image/embedding_score'
].float_list.value[0] < self._context_features_score_threshold:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
if self._keep_only_positives_gt:
if len(example.features.feature[
'image/object/bbox/xmin'
].float_list.value) < 1:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
example_embedding = list(example.features.feature[
'image/embedding'].float_list.value)
context_features.extend(example_embedding)
num_embeddings = example.features.feature[
'image/embedding_count'].int64_list.value[0]
example_image_id = example.features.feature[
'image/source_id'].bytes_list.value[0]
for _ in range(num_embeddings):
example.features.feature[
'context_features_idx'].int64_list.value.append(count)
count += 1
context_features_image_id_list.append(example_image_id)
if not example_embedding:
example_embedding.append(np.zeros(self._context_feature_length))
feature_length = self._context_feature_length
# If the example_list is not empty and image/embedding_length is in the
# featture dict, feature_length will be assigned to that. Otherwise, it will
# be kept as default.
if example_list and (
'image/embedding_length' in example_list[0].features.feature):
feature_length = example_list[0].features.feature[
'image/embedding_length'].int64_list.value[0]
if self._pad_context_features:
while len(context_features_image_id_list) < (
self._max_num_elements_in_context_features):
context_features_image_id_list.append('')
return context_features, feature_length, context_features_image_id_list
def _add_context_to_example(self, grouped_entry):
key, example_collection = grouped_entry
list_of_examples = []
example_list = list(example_collection)
if self._add_context_features:
context_features, feature_length, context_features_image_id_list = (
self._build_context_features(example_list))
if self._image_ids_to_keep is not None:
new_example_list = []
for example in example_list:
im_id = example.features.feature['image/source_id'].bytes_list.value[0]
self._images_loaded.inc(1)
if six.ensure_str(im_id) in self._image_ids_to_keep:
self._images_kept.inc(1)
new_example_list.append(example)
if new_example_list:
example_list = new_example_list
else:
return []
if self._output_type == 'tf_sequence_example':
if self._max_clip_length is not None:
# For now, no overlap
clips = get_sliding_window(
example_list, self._max_clip_length, self._max_clip_length)
else:
clips = [example_list]
for clip_num, clip_list in enumerate(clips):
# initialize sequence example
seq_example = tf.train.SequenceExample()
video_id = six.ensure_str(key)+'_'+ str(clip_num)
seq_example.context.feature['clip/media_id'].bytes_list.value.append(
video_id.encode('utf8'))
seq_example.context.feature['clip/frames'].int64_list.value.append(
len(clip_list))
seq_example.context.feature[
'clip/start/timestamp'].int64_list.value.append(0)
seq_example.context.feature[
'clip/end/timestamp'].int64_list.value.append(len(clip_list))
seq_example.context.feature['image/format'].bytes_list.value.append(
six.ensure_binary('JPG'))
seq_example.context.feature['image/channels'].int64_list.value.append(3)
context_example = clip_list[0]
seq_example.context.feature['image/height'].int64_list.value.append(
context_example.features.feature[
'image/height'].int64_list.value[0])
seq_example.context.feature['image/width'].int64_list.value.append(
context_example.features.feature['image/width'].int64_list.value[0])
seq_example.context.feature[
'image/context_feature_length'].int64_list.value.append(
feature_length)
seq_example.context.feature[
'image/context_features'].float_list.value.extend(
context_features)
if self._keep_context_features_image_id_list:
seq_example.context.feature[
'image/context_features_image_id_list'].bytes_list.value.extend(
context_features_image_id_list)
encoded_image_list = seq_example.feature_lists.feature_list[
'image/encoded']
timestamps_list = seq_example.feature_lists.feature_list[
'image/timestamp']
context_features_idx_list = seq_example.feature_lists.feature_list[
'image/context_features_idx']
date_captured_list = seq_example.feature_lists.feature_list[
'image/date_captured']
unix_time_list = seq_example.feature_lists.feature_list[
'image/unix_time']
location_list = seq_example.feature_lists.feature_list['image/location']
image_ids_list = seq_example.feature_lists.feature_list[
'image/source_id']
gt_xmin_list = seq_example.feature_lists.feature_list[
'region/bbox/xmin']
gt_xmax_list = seq_example.feature_lists.feature_list[
'region/bbox/xmax']
gt_ymin_list = seq_example.feature_lists.feature_list[
'region/bbox/ymin']
gt_ymax_list = seq_example.feature_lists.feature_list[
'region/bbox/ymax']
gt_type_list = seq_example.feature_lists.feature_list[
'region/label/index']
gt_type_string_list = seq_example.feature_lists.feature_list[
'region/label/string']
gt_is_annotated_list = seq_example.feature_lists.feature_list[
'region/is_annotated']
for idx, example in enumerate(clip_list):
encoded_image = encoded_image_list.feature.add()
encoded_image.bytes_list.value.extend(
example.features.feature['image/encoded'].bytes_list.value)
image_id = image_ids_list.feature.add()
image_id.bytes_list.value.append(
example.features.feature['image/source_id'].bytes_list.value[0])
timestamp = timestamps_list.feature.add()
# Timestamp is currently order in the list.
timestamp.int64_list.value.extend([idx])
context_features_idx = context_features_idx_list.feature.add()
context_features_idx.int64_list.value.extend(
example.features.feature['context_features_idx'].int64_list.value)
date_captured = date_captured_list.feature.add()
date_captured.bytes_list.value.extend(
example.features.feature['image/date_captured'].bytes_list.value)
unix_time = unix_time_list.feature.add()
unix_time.float_list.value.extend(
example.features.feature['image/unix_time'].float_list.value)
location = location_list.feature.add()
location.bytes_list.value.extend(
example.features.feature['image/location'].bytes_list.value)
gt_xmin = gt_xmin_list.feature.add()
gt_xmax = gt_xmax_list.feature.add()
gt_ymin = gt_ymin_list.feature.add()
gt_ymax = gt_ymax_list.feature.add()
gt_type = gt_type_list.feature.add()
gt_type_str = gt_type_string_list.feature.add()
gt_is_annotated = gt_is_annotated_list.feature.add()
gt_is_annotated.int64_list.value.append(1)
gt_xmin.float_list.value.extend(
example.features.feature[
'image/object/bbox/xmin'].float_list.value)
gt_xmax.float_list.value.extend(
example.features.feature[
'image/object/bbox/xmax'].float_list.value)
gt_ymin.float_list.value.extend(
example.features.feature[
'image/object/bbox/ymin'].float_list.value)
gt_ymax.float_list.value.extend(
example.features.feature[
'image/object/bbox/ymax'].float_list.value)
gt_type.int64_list.value.extend(
example.features.feature[
'image/object/class/label'].int64_list.value)
gt_type_str.bytes_list.value.extend(
example.features.feature[
'image/object/class/text'].bytes_list.value)
self._num_examples_processed.inc(1)
list_of_examples.append(seq_example)
elif self._output_type == 'tf_example':
for example in example_list:
im_id = example.features.feature['image/source_id'].bytes_list.value[0]
if self._add_context_features:
example.features.feature[
'image/context_features'].float_list.value.extend(
context_features)
example.features.feature[
'image/context_feature_length'].int64_list.value.append(
feature_length)
if self._keep_context_features_image_id_list:
example.features.feature[
'image/context_features_image_id_list'].bytes_list.value.extend(
context_features_image_id_list)
self._num_examples_processed.inc(1)
list_of_examples.append(example)
return list_of_examples
def construct_pipeline(pipeline,
input_tfrecord,
output_tfrecord,
sequence_key,
time_horizon=None,
subsample_context_features_rate=0,
reduce_image_size=True,
max_image_dimension=1024,
add_context_features=True,
sorted_image_ids=True,
image_ids_to_keep='All',
keep_context_features_image_id_list=False,
keep_only_positives=False,
context_features_score_threshold=0.7,
keep_only_positives_gt=False,
max_num_elements_in_context_features=5000,
num_shards=0,
output_type='tf_example',
max_clip_length=None,
context_feature_length=2057):
"""Returns a beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: An TFRecord of tf.train.Example protos containing images.
output_tfrecord: An TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
sequence_key: A feature name to use as a key for grouping sequences.
time_horizon: What length of time to use to partition the data when building
the memory banks. Options: `year`, `month`, `week`, `day `, `hour`,
`minute`, None.
subsample_context_features_rate: What rate to subsample images for the
contextual memory bank.
reduce_image_size: Whether to reduce the size of the stored images.
max_image_dimension: The maximum image dimension to use for resizing.
add_context_features: Whether to keep and store the contextual memory bank.
sorted_image_ids: Whether the image ids are sortable, and can be used as
datetime tie-breakers when building memory banks.
image_ids_to_keep: A list of image ids to save, to use to build data subsets
for evaluation.
keep_context_features_image_id_list: Whether to save an ordered list of the
ids of the images in the contextual memory bank.
keep_only_positives: Whether to only keep high scoring
(>context_features_score_threshold) features in the contextual memory
bank.
context_features_score_threshold: What threshold to use for keeping
features.
keep_only_positives_gt: Whether to only keep features from images that
contain objects based on the ground truth (for training).
max_num_elements_in_context_features: the maximum number of elements in the
memory bank
num_shards: The number of output shards.
output_type: What type of output, tf_example of tf_sequence_example
max_clip_length: The maximum length of a sequence example, before
splitting into multiple
context_feature_length: The length of the context feature embeddings stored
in the input data.
"""
if output_type == 'tf_example':
coder = beam.coders.ProtoCoder(tf.train.Example)
elif output_type == 'tf_sequence_example':
coder = beam.coders.ProtoCoder(tf.train.SequenceExample)
else:
raise ValueError('Unsupported output type.')
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord,
coder=beam.coders.BytesCoder()))
rekey_collection = input_collection | 'RekeyExamples' >> beam.ParDo(
ReKeyDataFn(sequence_key, time_horizon,
reduce_image_size, max_image_dimension))
grouped_collection = (
rekey_collection | 'GroupBySequenceKey' >> beam.GroupByKey())
grouped_collection = (
grouped_collection | 'ReshuffleGroups' >> beam.Reshuffle())
ordered_collection = (
grouped_collection | 'OrderByFrameNumber' >> beam.ParDo(
SortGroupedDataFn(sequence_key, sorted_image_ids,
max_num_elements_in_context_features)))
ordered_collection = (
ordered_collection | 'ReshuffleSortedGroups' >> beam.Reshuffle())
output_collection = (
ordered_collection | 'AddContextToExamples' >> beam.ParDo(
GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
keep_context_features_image_id_list=(
keep_context_features_image_id_list),
subsample_context_features_rate=subsample_context_features_rate,
keep_only_positives=keep_only_positives,
keep_only_positives_gt=keep_only_positives_gt,
context_features_score_threshold=(
context_features_score_threshold),
max_num_elements_in_context_features=(
max_num_elements_in_context_features),
output_type=output_type,
max_clip_length=max_clip_length,
context_feature_length=context_feature_length)))
output_collection = (
output_collection | 'ReshuffleExamples' >> beam.Reshuffle())
_ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=coder)
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_tfrecord',
dest='input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection, with bounding boxes and contextual feature embeddings.')
parser.add_argument(
'--output_tfrecord',
dest='output_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format, with added '
'contextual memory banks.')
parser.add_argument(
'--sequence_key',
dest='sequence_key',
default='image/location',
help='Key to use when grouping sequences: so far supports `image/seq_id` '
'and `image/location`.')
parser.add_argument(
'--context_feature_length',
dest='context_feature_length',
default=2057,
help='The length of the context feature embeddings stored in the input '
'data.')
parser.add_argument(
'--time_horizon',
dest='time_horizon',
default=None,
help='What time horizon to use when splitting the data, if any. Options '
'are: `year`, `month`, `week`, `day `, `hour`, `minute`, `None`.')
parser.add_argument(
'--subsample_context_features_rate',
dest='subsample_context_features_rate',
default=0,
help='Whether to subsample the context_features, and if so how many to '
'sample. If the rate is set to X, it will sample context from 1 out of '
'every X images. Default is sampling from every image, which is X=0.')
parser.add_argument(
'--reduce_image_size',
dest='reduce_image_size',
default=True,
help='downsamples images to have longest side max_image_dimension, '
'maintaining aspect ratio')
parser.add_argument(
'--max_image_dimension',
dest='max_image_dimension',
default=1024,
help='Sets max image dimension for resizing.')
parser.add_argument(
'--add_context_features',
dest='add_context_features',
default=True,
help='Adds a memory bank of embeddings to each clip')
parser.add_argument(
'--sorted_image_ids',
dest='sorted_image_ids',
default=True,
help='Whether the image source_ids are sortable to deal with '
'date_captured tie-breaks.')
parser.add_argument(
'--image_ids_to_keep',
dest='image_ids_to_keep',
default='All',
help='Path to .json list of image ids to keep, used for ground truth '
'eval creation.')
parser.add_argument(
'--keep_context_features_image_id_list',
dest='keep_context_features_image_id_list',
default=False,
help='Whether or not to keep a list of the image_ids corresponding to '
'the memory bank.')
parser.add_argument(
'--keep_only_positives',
dest='keep_only_positives',
default=False,
help='Whether or not to keep only positive boxes based on score.')
parser.add_argument(
'--context_features_score_threshold',
dest='context_features_score_threshold',
default=0.7,
help='What score threshold to use for boxes in context_features, when '
'`keep_only_positives` is set to `True`.')
parser.add_argument(
'--keep_only_positives_gt',
dest='keep_only_positives_gt',
default=False,
help='Whether or not to keep only positive boxes based on gt class.')
parser.add_argument(
'--max_num_elements_in_context_features',
dest='max_num_elements_in_context_features',
default=2000,
help='Sets max number of context feature elements per memory bank. '
'If the number of images in the context group is greater than '
'`max_num_elements_in_context_features`, the context group will be split.'
)
parser.add_argument(
'--output_type',
dest='output_type',
default='tf_example',
help='Output type, one of `tf_example`, `tf_sequence_example`.')
parser.add_argument(
'--max_clip_length',
dest='max_clip_length',
default=None,
help='Max length for sequence example outputs.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.input_tfrecord,
args.output_tfrecord,
args.sequence_key,
args.time_horizon,
args.subsample_context_features_rate,
args.reduce_image_size,
args.max_image_dimension,
args.add_context_features,
args.sorted_image_ids,
args.image_ids_to_keep,
args.keep_context_features_image_id_list,
args.keep_only_positives,
args.context_features_score_threshold,
args.keep_only_positives_gt,
args.max_num_elements_in_context_features,
args.num_shards,
args.output_type,
args.max_clip_length,
args.context_feature_length)
p.run()
if __name__ == '__main__':
main()
| 38,783 | 39.066116 | 80 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_embedding_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
num_features = 100
feature_dims = 10
classifier_feature = np.ones(
(2, feature_dims, feature_dims, num_features),
dtype=np.float32).tolist()
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32),
'detection_features':
tf.constant([classifier_feature],
tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateEmbeddingData(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def FloatFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(400),
'image/width': Int64Feature(600),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
'image/object/bbox/xmin': FloatFeature(0.1),
'image/object/bbox/xmax': FloatFeature(0.6),
'image/object/bbox/ymin': FloatFeature(0.0),
'image/object/bbox/ymax': FloatFeature(0.5),
'image/object/class/score': FloatFeature(0.95),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/date_captured': BytesFeature(b'2019-10-20 12:12:12')
}))
return example.SerializeToString()
def assert_expected_example(self, example, topk=False, botk=False):
# Check embeddings
if topk or botk:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
218)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[2])
else:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
109)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[1])
self.assertAllEqual(
example.features.feature['image/embedding_length'].int64_list.value,
[109])
# Check annotations
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [400])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [600])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_embedding_data_fn(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example)
def test_generate_embedding_data_with_top_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 2
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, topk=True)
def test_generate_embedding_data_with_bottom_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 0
bottom_k_embedding_count = 2
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, botk=True)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
num_shards = 1
embedding_type = 'final_box_features'
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_embedding_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
top_k_embedding_count, bottom_k_embedding_count, num_shards,
embedding_type)
p.run()
filenames = tf.io.gfile.glob(
output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 13,157 | 38.63253 | 119 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to generate detection data for camera trap images.
This tools allows to run inference with an exported Object Detection model in
`saved_model` format and produce raw detection boxes on images in tf.Examples,
with the assumption that the bounding box class label will match the image-level
class label in the tf.Example.
Steps to generate a detection dataset:
1. Use object_detection/export_inference_graph.py to get a `saved_model` for
inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example
protos containing images for inference.
Example Usage:
--------------
python tensorflow_models/object_detection/export_inference_graph.py \
--alsologtostderr \
--input_type tf_example \
--pipeline_config_path path/to/detection_model.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
python generate_detection_data.py \
--alsologtostderr \
--input_tfrecord path/to/input_tfrecord@X \
--output_tfrecord path/to/output_tfrecord@X \
--model_dir path/to/exported_model_directory/saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import threading
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class GenerateDetectionDataFn(beam.DoFn):
"""Generates detection data for camera trap images.
This Beam DoFn performs inference with an object detection `saved_model` and
produces detection boxes for camera trap data, matched to the
object class.
"""
session_lock = threading.Lock()
def __init__(self, model_dir, confidence_threshold):
"""Initialization function.
Args:
model_dir: A directory containing saved model.
confidence_threshold: the confidence threshold for boxes to keep
"""
self._model_dir = model_dir
self._confidence_threshold = confidence_threshold
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'detection_data_generation', 'num_tf_examples_processed')
def setup(self):
self._load_inference_model()
def _load_inference_model(self):
# Because initialization of the tf.Session is expensive we share
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfrecord_entry):
return self._run_inference_and_generate_detections(tfrecord_entry)
def _run_inference_and_generate_detections(self, tfrecord_entry):
input_example = tf.train.Example.FromString(tfrecord_entry)
if input_example.features.feature[
'image/object/bbox/ymin'].float_list.value:
# There are already ground truth boxes for this image, just keep them.
return [input_example]
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0)))
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
example = tf.train.Example()
num_detections = int(num_detections[0])
image_class_labels = input_example.features.feature[
'image/object/class/label'].int64_list.value
image_class_texts = input_example.features.feature[
'image/object/class/text'].bytes_list.value
# Ignore any images with multiple classes,
# we can't match the class to the box.
if len(image_class_labels) > 1:
return []
# Don't add boxes for images already labeled empty (for now)
if len(image_class_labels) == 1:
# Add boxes over confidence threshold.
for idx, score in enumerate(detection_scores[0]):
if score >= self._confidence_threshold and idx < num_detections:
example.features.feature[
'image/object/bbox/ymin'].float_list.value.extend([
detection_boxes[0, idx, 0]])
example.features.feature[
'image/object/bbox/xmin'].float_list.value.extend([
detection_boxes[0, idx, 1]])
example.features.feature[
'image/object/bbox/ymax'].float_list.value.extend([
detection_boxes[0, idx, 2]])
example.features.feature[
'image/object/bbox/xmax'].float_list.value.extend([
detection_boxes[0, idx, 3]])
# Add box scores and class texts and labels.
example.features.feature[
'image/object/class/score'].float_list.value.extend(
[score])
example.features.feature[
'image/object/class/label'].int64_list.value.extend(
[image_class_labels[0]])
example.features.feature[
'image/object/class/text'].bytes_list.value.extend(
[image_class_texts[0]])
# Add other essential example attributes
example.features.feature['image/encoded'].bytes_list.value.extend(
input_example.features.feature['image/encoded'].bytes_list.value)
example.features.feature['image/height'].int64_list.value.extend(
input_example.features.feature['image/height'].int64_list.value)
example.features.feature['image/width'].int64_list.value.extend(
input_example.features.feature['image/width'].int64_list.value)
example.features.feature['image/source_id'].bytes_list.value.extend(
input_example.features.feature['image/source_id'].bytes_list.value)
example.features.feature['image/location'].bytes_list.value.extend(
input_example.features.feature['image/location'].bytes_list.value)
example.features.feature['image/date_captured'].bytes_list.value.extend(
input_example.features.feature['image/date_captured'].bytes_list.value)
example.features.feature['image/class/text'].bytes_list.value.extend(
input_example.features.feature['image/class/text'].bytes_list.value)
example.features.feature['image/class/label'].int64_list.value.extend(
input_example.features.feature['image/class/label'].int64_list.value)
example.features.feature['image/seq_id'].bytes_list.value.extend(
input_example.features.feature['image/seq_id'].bytes_list.value)
example.features.feature['image/seq_num_frames'].int64_list.value.extend(
input_example.features.feature['image/seq_num_frames'].int64_list.value)
example.features.feature['image/seq_frame_num'].int64_list.value.extend(
input_example.features.feature['image/seq_frame_num'].int64_list.value)
self._num_examples_processed.inc(1)
return [example]
def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir,
confidence_threshold, num_shards):
"""Returns a Beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: A TFRecord of tf.train.Example protos containing images.
output_tfrecord: A TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
model_dir: Path to `saved_model` to use for inference.
confidence_threshold: Threshold to use when keeping detection results.
num_shards: The number of output shards.
"""
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord,
coder=beam.coders.BytesCoder()))
output_collection = input_collection | 'RunInference' >> beam.ParDo(
GenerateDetectionDataFn(model_dir, confidence_threshold))
output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle()
_ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--detection_input_tfrecord',
dest='detection_input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection.')
parser.add_argument(
'--detection_output_tfrecord',
dest='detection_output_tfrecord',
required=True,
help='TFRecord containing detections in tf.Example format.')
parser.add_argument(
'--detection_model_dir',
dest='detection_model_dir',
required=True,
help='Path to directory containing an object detection SavedModel.')
parser.add_argument(
'--confidence_threshold',
dest='confidence_threshold',
default=0.9,
help='Min confidence to keep bounding boxes.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.detection_output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.detection_input_tfrecord,
args.detection_output_tfrecord,
args.detection_model_dir,
args.confidence_threshold,
args.num_shards)
p.run()
if __name__ == '__main__':
main()
| 10,884 | 37.327465 | 80 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/inference/detection_inference_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tests for detection_inference.py."""
import os
import unittest
import numpy as np
from PIL import Image
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.core import standard_fields
from object_detection.inference import detection_inference
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
def get_mock_tfrecord_path():
return os.path.join(tf.test.get_temp_dir(), 'mock.tfrec')
def create_mock_tfrecord():
pil_image = Image.fromarray(np.array([[[123, 0, 0]]], dtype=np.uint8), 'RGB')
image_output_stream = six.BytesIO()
pil_image.save(image_output_stream, format='png')
encoded_image = image_output_stream.getvalue()
feature_map = {
'test_field':
dataset_util.float_list_feature([1, 2, 3, 4]),
standard_fields.TfExampleFields.image_encoded:
dataset_util.bytes_feature(encoded_image),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature_map))
with tf.python_io.TFRecordWriter(get_mock_tfrecord_path()) as writer:
writer.write(tf_example.SerializeToString())
return encoded_image
def get_mock_graph_path():
return os.path.join(tf.test.get_temp_dir(), 'mock_graph.pb')
def create_mock_graph():
g = tf.Graph()
with g.as_default():
in_image_tensor = tf.placeholder(
tf.uint8, shape=[1, None, None, 3], name='image_tensor')
tf.constant([2.0], name='num_detections')
tf.constant(
[[[0, 0.8, 0.7, 1], [0.1, 0.2, 0.8, 0.9], [0.2, 0.3, 0.4, 0.5]]],
name='detection_boxes')
tf.constant([[0.1, 0.2, 0.3]], name='detection_scores')
tf.identity(
tf.constant([[1.0, 2.0, 3.0]]) *
tf.reduce_sum(tf.cast(in_image_tensor, dtype=tf.float32)),
name='detection_classes')
graph_def = g.as_graph_def()
with tf.gfile.Open(get_mock_graph_path(), 'w') as fl:
fl.write(graph_def.SerializeToString())
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class InferDetectionsTests(tf.test.TestCase):
def test_simple(self):
create_mock_graph()
encoded_image = create_mock_tfrecord()
serialized_example_tensor, image_tensor = detection_inference.build_input(
[get_mock_tfrecord_path()])
self.assertAllEqual(image_tensor.get_shape().as_list(), [1, None, None, 3])
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, get_mock_graph_path())
with self.test_session(use_gpu=False) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor, False)
expected_example = tf.train.Example()
text_format.Merge(r"""
features {
feature {
key: "image/detection/bbox/ymin"
value { float_list { value: [0.0, 0.1] } } }
feature {
key: "image/detection/bbox/xmin"
value { float_list { value: [0.8, 0.2] } } }
feature {
key: "image/detection/bbox/ymax"
value { float_list { value: [0.7, 0.8] } } }
feature {
key: "image/detection/bbox/xmax"
value { float_list { value: [1.0, 0.9] } } }
feature {
key: "image/detection/label"
value { int64_list { value: [123, 246] } } }
feature {
key: "image/detection/score"
value { float_list { value: [0.1, 0.2] } } }
feature {
key: "test_field"
value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }""",
expected_example)
expected_example.features.feature[
standard_fields.TfExampleFields
.image_encoded].CopyFrom(dataset_util.bytes_feature(encoded_image))
self.assertProtoEquals(expected_example, tf_example)
def test_discard_image(self):
create_mock_graph()
create_mock_tfrecord()
serialized_example_tensor, image_tensor = detection_inference.build_input(
[get_mock_tfrecord_path()])
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, get_mock_graph_path())
with self.test_session(use_gpu=False) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor, True)
self.assertProtoEquals(r"""
features {
feature {
key: "image/detection/bbox/ymin"
value { float_list { value: [0.0, 0.1] } } }
feature {
key: "image/detection/bbox/xmin"
value { float_list { value: [0.8, 0.2] } } }
feature {
key: "image/detection/bbox/ymax"
value { float_list { value: [0.7, 0.8] } } }
feature {
key: "image/detection/bbox/xmax"
value { float_list { value: [1.0, 0.9] } } }
feature {
key: "image/detection/label"
value { int64_list { value: [123, 246] } } }
feature {
key: "image/detection/score"
value { float_list { value: [0.1, 0.2] } } }
feature {
key: "test_field"
value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }
""", tf_example)
if __name__ == '__main__':
tf.test.main()
| 6,521 | 35.640449 | 80 | py |
models | models-master/research/object_detection/inference/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/inference/infer_detections.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Infers detections on a TFRecord of TFExamples given an inference graph.
Example usage:
./infer_detections \
--input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \
--output_tfrecord_path=/path/to/output/detections.tfrecord \
--inference_graph=/path/to/frozen_weights_inference_graph.pb
The output is a TFRecord of TFExamples. Each TFExample from the input is first
augmented with detections from the inference graph and then copied to the
output.
The input and output nodes of the inference graph are expected to have the same
types, shapes, and semantics, as the input and output nodes of graphs produced
by export_inference_graph.py, when run with --input_type=image_tensor.
The script can also discard the image pixels in the output. This greatly
reduces the output size and can potentially accelerate reading data in
subsequent processing steps that don't require the images (e.g. computing
metrics).
"""
import itertools
import tensorflow.compat.v1 as tf
from object_detection.inference import detection_inference
tf.flags.DEFINE_string('input_tfrecord_paths', None,
'A comma separated list of paths to input TFRecords.')
tf.flags.DEFINE_string('output_tfrecord_path', None,
'Path to the output TFRecord.')
tf.flags.DEFINE_string('inference_graph', None,
'Path to the inference graph with embedded weights.')
tf.flags.DEFINE_boolean('discard_image_pixels', False,
'Discards the images in the output TFExamples. This'
' significantly reduces the output size and is useful'
' if the subsequent tools don\'t need access to the'
' images (e.g. when computing evaluation measures).')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = ['input_tfrecord_paths', 'output_tfrecord_path',
'inference_graph']
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
with tf.Session() as sess:
input_tfrecord_paths = [
v for v in FLAGS.input_tfrecord_paths.split(',') if v]
tf.logging.info('Reading input from %d files', len(input_tfrecord_paths))
serialized_example_tensor, image_tensor = detection_inference.build_input(
input_tfrecord_paths)
tf.logging.info('Reading graph and building model...')
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, FLAGS.inference_graph)
tf.logging.info('Running inference and writing output to {}'.format(
FLAGS.output_tfrecord_path))
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
with tf.python_io.TFRecordWriter(
FLAGS.output_tfrecord_path) as tf_record_writer:
try:
for counter in itertools.count():
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,
counter)
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor,
FLAGS.discard_image_pixels)
tf_record_writer.write(tf_example.SerializeToString())
except tf.errors.OutOfRangeError:
tf.logging.info('Finished processing records')
if __name__ == '__main__':
tf.app.run()
| 4,266 | 42.989691 | 80 | py |
models | models-master/research/object_detection/inference/detection_inference.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for detection inference."""
from __future__ import division
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
def build_input(tfrecord_paths):
"""Builds the graph's input.
Args:
tfrecord_paths: List of paths to the input TFRecords
Returns:
serialized_example_tensor: The next serialized example. String scalar Tensor
image_tensor: The decoded image of the example. Uint8 tensor,
shape=[1, None, None,3]
"""
filename_queue = tf.train.string_input_producer(
tfrecord_paths, shuffle=False, num_epochs=1)
tf_record_reader = tf.TFRecordReader()
_, serialized_example_tensor = tf_record_reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example_tensor,
features={
standard_fields.TfExampleFields.image_encoded:
tf.FixedLenFeature([], tf.string),
})
encoded_image = features[standard_fields.TfExampleFields.image_encoded]
image_tensor = tf.image.decode_image(encoded_image, channels=3)
image_tensor.set_shape([None, None, 3])
image_tensor = tf.expand_dims(image_tensor, 0)
return serialized_example_tensor, image_tensor
def build_inference_graph(image_tensor, inference_graph_path):
"""Loads the inference graph and connects it to the input image.
Args:
image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3]
inference_graph_path: Path to the inference graph with embedded weights
Returns:
detected_boxes_tensor: Detected boxes. Float tensor,
shape=[num_detections, 4]
detected_scores_tensor: Detected scores. Float tensor,
shape=[num_detections]
detected_labels_tensor: Detected labels. Int64 tensor,
shape=[num_detections]
"""
with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file:
graph_content = graph_def_file.read()
graph_def = tf.GraphDef()
graph_def.MergeFromString(graph_content)
tf.import_graph_def(
graph_def, name='', input_map={'image_tensor': image_tensor})
g = tf.get_default_graph()
num_detections_tensor = tf.squeeze(
g.get_tensor_by_name('num_detections:0'), 0)
num_detections_tensor = tf.cast(num_detections_tensor, tf.int32)
detected_boxes_tensor = tf.squeeze(
g.get_tensor_by_name('detection_boxes:0'), 0)
detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor]
detected_scores_tensor = tf.squeeze(
g.get_tensor_by_name('detection_scores:0'), 0)
detected_scores_tensor = detected_scores_tensor[:num_detections_tensor]
detected_labels_tensor = tf.squeeze(
g.get_tensor_by_name('detection_classes:0'), 0)
detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64)
detected_labels_tensor = detected_labels_tensor[:num_detections_tensor]
return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor
def infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor, discard_image_pixels):
"""Runs the supplied tensors and adds the inferred detections to the example.
Args:
serialized_example_tensor: Serialized TF example. Scalar string tensor
detected_boxes_tensor: Detected boxes. Float tensor,
shape=[num_detections, 4]
detected_scores_tensor: Detected scores. Float tensor,
shape=[num_detections]
detected_labels_tensor: Detected labels. Int64 tensor,
shape=[num_detections]
discard_image_pixels: If true, discards the image from the result
Returns:
The de-serialized TF example augmented with the inferred detections.
"""
tf_example = tf.train.Example()
(serialized_example, detected_boxes, detected_scores,
detected_classes) = tf.get_default_session().run([
serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor
])
detected_boxes = detected_boxes.T
tf_example.ParseFromString(serialized_example)
feature = tf_example.features.feature
feature[standard_fields.TfExampleFields.
detection_score].float_list.value[:] = detected_scores
feature[standard_fields.TfExampleFields.
detection_bbox_ymin].float_list.value[:] = detected_boxes[0]
feature[standard_fields.TfExampleFields.
detection_bbox_xmin].float_list.value[:] = detected_boxes[1]
feature[standard_fields.TfExampleFields.
detection_bbox_ymax].float_list.value[:] = detected_boxes[2]
feature[standard_fields.TfExampleFields.
detection_bbox_xmax].float_list.value[:] = detected_boxes[3]
feature[standard_fields.TfExampleFields.
detection_class_label].int64_list.value[:] = detected_classes
if discard_image_pixels:
del feature[standard_fields.TfExampleFields.image_encoded]
return tf_example
| 5,516 | 37.852113 | 80 | py |
models | models-master/research/object_detection/builders/model_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.model_builder."""
from absl.testing import parameterized
from google.protobuf import text_format
from object_detection.builders import model_builder
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.meta_architectures import rfcn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import hyperparams_pb2
from object_detection.protos import losses_pb2
from object_detection.protos import model_pb2
from object_detection.utils import test_case
class ModelBuilderTest(test_case.TestCase, parameterized.TestCase):
def default_ssd_feature_extractor(self):
raise NotImplementedError
def default_faster_rcnn_feature_extractor(self):
raise NotImplementedError
def ssd_feature_extractors(self):
raise NotImplementedError
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
raise NotImplementedError
def faster_rcnn_feature_extractors(self):
raise NotImplementedError
def create_model(self, model_config, is_training=True):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
is_training: True if this model is being built for training purposes.
Returns:
DetectionModel based on the config.
"""
return model_builder.build(model_config, is_training=is_training)
def create_default_ssd_model_proto(self):
"""Creates a DetectionModel proto with ssd model fields populated."""
model_text_proto = """
ssd {
feature_extractor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model_proto.ssd.feature_extractor.type = (self.
default_ssd_feature_extractor())
return model_proto
def create_default_faster_rcnn_model_proto(self):
"""Creates a DetectionModel proto with FasterRCNN model fields populated."""
model_text_proto = """
faster_rcnn {
inplace_batchnorm_update: false
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
(model_proto.faster_rcnn.feature_extractor.type
) = self.default_faster_rcnn_feature_extractor()
return model_proto
def test_create_ssd_models_from_config(self):
model_proto = self.create_default_ssd_model_proto()
for extractor_type, extractor_class in self.ssd_feature_extractors().items(
):
model_proto.ssd.feature_extractor.type = extractor_type
model_proto.ssd.feature_extractor.override_base_feature_extractor_hyperparams = (
self.get_override_base_feature_extractor_hyperparams(extractor_type))
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
def test_create_ssd_fpn_model_from_config(self):
model_proto = self.create_default_ssd_model_proto()
model_proto.ssd.feature_extractor.fpn.min_level = 3
model_proto.ssd.feature_extractor.fpn.max_level = 7
model = model_builder.build(model_proto, is_training=True)
self.assertEqual(model._feature_extractor._fpn_min_level, 3)
self.assertEqual(model._feature_extractor._fpn_max_level, 7)
@parameterized.named_parameters(
{
'testcase_name': 'mask_rcnn_with_matmul',
'use_matmul_crop_and_resize': False,
'enable_mask_prediction': True
},
{
'testcase_name': 'mask_rcnn_without_matmul',
'use_matmul_crop_and_resize': True,
'enable_mask_prediction': True
},
{
'testcase_name': 'faster_rcnn_with_matmul',
'use_matmul_crop_and_resize': False,
'enable_mask_prediction': False
},
{
'testcase_name': 'faster_rcnn_without_matmul',
'use_matmul_crop_and_resize': True,
'enable_mask_prediction': False
},
)
def test_create_faster_rcnn_models_from_config(self,
use_matmul_crop_and_resize,
enable_mask_prediction):
model_proto = self.create_default_faster_rcnn_model_proto()
faster_rcnn_config = model_proto.faster_rcnn
faster_rcnn_config.use_matmul_crop_and_resize = use_matmul_crop_and_resize
if enable_mask_prediction:
faster_rcnn_config.second_stage_mask_prediction_loss_weight = 3.0
mask_predictor_config = (
faster_rcnn_config.second_stage_box_predictor.mask_rcnn_box_predictor)
mask_predictor_config.predict_instance_masks = True
for extractor_type, extractor_class in (
self.faster_rcnn_feature_extractors().items()):
faster_rcnn_config.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
if enable_mask_prediction:
self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0)
def test_create_faster_rcnn_model_from_config_with_example_miner(self):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.hard_example_miner.num_hard_examples = 64
model = model_builder.build(model_proto, is_training=True)
self.assertIsNotNone(model._hard_example_miner)
def test_create_rfcn_model_from_config(self):
model_proto = self.create_default_faster_rcnn_model_proto()
rfcn_predictor_config = (
model_proto.faster_rcnn.second_stage_box_predictor.rfcn_box_predictor)
rfcn_predictor_config.conv_hyperparams.op = hyperparams_pb2.Hyperparams.CONV
for extractor_type, extractor_class in (
self.faster_rcnn_feature_extractors().items()):
model_proto.faster_rcnn.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
@parameterized.parameters(True, False)
def test_create_faster_rcnn_from_config_with_crop_feature(
self, output_final_box_features):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.output_final_box_features = (
output_final_box_features)
_ = model_builder.build(model_proto, is_training=True)
def test_invalid_model_config_proto(self):
model_proto = ''
with self.assertRaisesRegex(
ValueError, 'model_config not of type model_pb2.DetectionModel.'):
model_builder.build(model_proto, is_training=True)
def test_unknown_meta_architecture(self):
model_proto = model_pb2.DetectionModel()
with self.assertRaisesRegex(ValueError, 'Unknown meta architecture'):
model_builder.build(model_proto, is_training=True)
def test_unknown_ssd_feature_extractor(self):
model_proto = self.create_default_ssd_model_proto()
model_proto.ssd.feature_extractor.type = 'unknown_feature_extractor'
with self.assertRaises(ValueError):
model_builder.build(model_proto, is_training=True)
def test_unknown_faster_rcnn_feature_extractor(self):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.feature_extractor.type = 'unknown_feature_extractor'
with self.assertRaises(ValueError):
model_builder.build(model_proto, is_training=True)
def test_invalid_first_stage_nms_iou_threshold(self):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.first_stage_nms_iou_threshold = 1.1
with self.assertRaisesRegex(ValueError,
r'iou_threshold not in \[0, 1\.0\]'):
model_builder.build(model_proto, is_training=True)
model_proto.faster_rcnn.first_stage_nms_iou_threshold = -0.1
with self.assertRaisesRegex(ValueError,
r'iou_threshold not in \[0, 1\.0\]'):
model_builder.build(model_proto, is_training=True)
def test_invalid_second_stage_batch_size(self):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.first_stage_max_proposals = 1
model_proto.faster_rcnn.second_stage_batch_size = 2
with self.assertRaisesRegex(
ValueError, 'second_stage_batch_size should be no greater '
'than first_stage_max_proposals.'):
model_builder.build(model_proto, is_training=True)
def test_invalid_faster_rcnn_batchnorm_update(self):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.inplace_batchnorm_update = True
with self.assertRaisesRegex(ValueError,
'inplace batchnorm updates not supported'):
model_builder.build(model_proto, is_training=True)
def test_create_experimental_model(self):
model_text_proto = """
experimental_model {
name: 'model42'
}"""
build_func = lambda *args: 42
model_builder.EXPERIMENTAL_META_ARCH_BUILDER_MAP['model42'] = build_func
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
self.assertEqual(model_builder.build(model_proto, is_training=True), 42)
| 13,019 | 35.573034 | 87 | py |
models | models-master/research/object_detection/builders/calibration_builder_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| 10,320 | 43.296137 | 80 | py |
models | models-master/research/object_detection/builders/input_reader_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.data_decoders import tf_example_decoder
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.protos import input_reader_pb2
parallel_reader = slim.parallel_reader
def build(input_reader_config):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
_, string_tensor = parallel_reader.parallel_read(
config.input_path[:], # Convert `RepeatedScalarContainer` to list.
reader_class=tf.TFRecordReader,
num_epochs=(input_reader_config.num_epochs
if input_reader_config.num_epochs else None),
num_readers=input_reader_config.num_readers,
shuffle=input_reader_config.shuffle,
dtypes=[tf.string, tf.string],
capacity=input_reader_config.queue_capacity,
min_after_dequeue=input_reader_config.min_after_dequeue)
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
input_type = input_reader_config.input_type
if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features)
return decoder.decode(string_tensor)
elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'):
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features,
load_context_image_ids=input_reader_config.load_context_image_ids)
return decoder.decode(string_tensor)
raise ValueError('Unsupported input_type.')
raise ValueError('Unsupported input_reader_config.')
| 3,892 | 41.315217 | 80 | py |
models | models-master/research/object_detection/builders/calibration_builder.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow ops to calibrate class predictions and background class."""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def _find_interval_containing_new_value(x, new_value):
"""Find the index of x (ascending-ordered) after which new_value occurs."""
new_value_shape = shape_utils.combined_static_and_dynamic_shape(new_value)[0]
x_shape = shape_utils.combined_static_and_dynamic_shape(x)[0]
compare = tf.cast(tf.reshape(new_value, shape=(new_value_shape, 1)) >=
tf.reshape(x, shape=(1, x_shape)),
dtype=tf.int32)
diff = compare[:, 1:] - compare[:, :-1]
interval_idx = tf.argmin(diff, axis=1)
return interval_idx
def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y):
"""Tensorflow implementation of 1d linear interpolation.
Args:
x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d
linear interpolation is performed.
fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape
(length,) used as the domain to approximate a function.
fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate
a function.
Returns:
tf.float32 Tensor of shape (num_examples,)
"""
x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0)
y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0)
interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate)
# Interpolate
alpha = (
(x_to_interpolate - tf.gather(x_pad, interval_idx)) /
(tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx)))
interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) +
alpha * tf.gather(y_pad, interval_idx + 1))
return interpolation
def _function_approximation_proto_to_tf_tensors(x_y_pairs_message):
"""Extracts (x,y) pairs from a XYPairs message.
Args:
x_y_pairs_message: calibration_pb2..XYPairs proto
Returns:
tf_x: tf.float32 tensor of shape (number_xy_pairs,) for function domain.
tf_y: tf.float32 tensor of shape (number_xy_pairs,) for function range.
"""
tf_x = tf.convert_to_tensor([x_y_pair.x
for x_y_pair
in x_y_pairs_message.x_y_pair],
dtype=tf.float32)
tf_y = tf.convert_to_tensor([x_y_pair.y
for x_y_pair
in x_y_pairs_message.x_y_pair],
dtype=tf.float32)
return tf_x, tf_y
def _get_class_id_function_dict(calibration_config):
"""Create a dictionary mapping class id to function approximations.
Args:
calibration_config: calibration_pb2 proto containing
id_function_approximations.
Returns:
Dictionary mapping a class id to a tuple of TF tensors to be used for
function approximation.
"""
class_id_function_dict = {}
class_id_xy_pairs_map = (
calibration_config.class_id_function_approximations.class_id_xy_pairs_map)
for class_id in class_id_xy_pairs_map:
class_id_function_dict[class_id] = (
_function_approximation_proto_to_tf_tensors(
class_id_xy_pairs_map[class_id]))
return class_id_function_dict
def build(calibration_config):
"""Returns a function that calibrates Tensorflow model scores.
All returned functions are expected to apply positive monotonic
transformations to inputs (i.e. score ordering is strictly preserved or
adjacent scores are mapped to the same score, but an input of lower value
should never be exceed an input of higher value after transformation). For
class-agnostic calibration, positive monotonicity should hold across all
scores. In class-specific cases, positive monotonicity should hold within each
class.
Args:
calibration_config: calibration_pb2.CalibrationConfig proto.
Returns:
Function that that accepts class_predictions_with_background and calibrates
the output based on calibration_config's parameters.
Raises:
ValueError: No calibration builder defined for "Oneof" in
calibration_config.
"""
# Linear Interpolation (usually used as a result of calibration via
# isotonic regression).
if calibration_config.WhichOneof('calibrator') == 'function_approximation':
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions via 1-d linear interpolation.
Predictions scores are linearly interpolated based on a class-agnostic
function approximation. Note that the 0-indexed background class is also
transformed.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing scores on the
interval [0,1]. This is usually produced by a sigmoid or softmax layer
and the result of calling the `predict` method of a detection model.
Returns:
tf.float32 tensor of the same shape as the input with values on the
interval [0, 1].
"""
# Flattening Tensors and then reshaping at the end.
flat_class_predictions_with_background = tf.reshape(
class_predictions_with_background, shape=[-1])
fn_x, fn_y = _function_approximation_proto_to_tf_tensors(
calibration_config.function_approximation.x_y_pairs)
updated_scores = _tf_linear_interp1d(
flat_class_predictions_with_background, fn_x, fn_y)
# Un-flatten the scores
original_detections_shape = shape_utils.combined_static_and_dynamic_shape(
class_predictions_with_background)
calibrated_class_predictions_with_background = tf.reshape(
updated_scores,
shape=original_detections_shape,
name='calibrate_scores')
return calibrated_class_predictions_with_background
elif (calibration_config.WhichOneof('calibrator') ==
'class_id_function_approximations'):
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions per class via 1-d linear interpolation.
Prediction scores are linearly interpolated with class-specific function
approximations. Note that after calibration, an anchor's class scores will
not necessarily sum to 1, and score ordering may change, depending on each
class' calibration parameters.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing scores on the
interval [0,1]. This is usually produced by a sigmoid or softmax layer
and the result of calling the `predict` method of a detection model.
Returns:
tf.float32 tensor of the same shape as the input with values on the
interval [0, 1].
Raises:
KeyError: Calibration parameters are not present for a class.
"""
class_id_function_dict = _get_class_id_function_dict(calibration_config)
# Tensors are split by class and then recombined at the end to recover
# the input's original shape. If a class id does not have calibration
# parameters, it is left unchanged.
class_tensors = tf.unstack(class_predictions_with_background, axis=-1)
calibrated_class_tensors = []
for class_id, class_tensor in enumerate(class_tensors):
flat_class_tensor = tf.reshape(class_tensor, shape=[-1])
if class_id in class_id_function_dict:
output_tensor = _tf_linear_interp1d(
x_to_interpolate=flat_class_tensor,
fn_x=class_id_function_dict[class_id][0],
fn_y=class_id_function_dict[class_id][1])
else:
tf.logging.info(
'Calibration parameters for class id `%d` not not found',
class_id)
output_tensor = flat_class_tensor
calibrated_class_tensors.append(output_tensor)
combined_calibrated_tensor = tf.stack(calibrated_class_tensors, axis=1)
input_shape = shape_utils.combined_static_and_dynamic_shape(
class_predictions_with_background)
calibrated_class_predictions_with_background = tf.reshape(
combined_calibrated_tensor,
shape=input_shape,
name='calibrate_scores')
return calibrated_class_predictions_with_background
elif (calibration_config.WhichOneof('calibrator') ==
'temperature_scaling_calibration'):
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions via temperature scaling.
Predictions logits scores are scaled by the temperature scaler. Note that
the 0-indexed background class is also transformed.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing logits scores.
This is usually produced before a sigmoid or softmax layer.
Returns:
tf.float32 tensor of the same shape as the input.
Raises:
ValueError: If temperature scaler is of incorrect value.
"""
scaler = calibration_config.temperature_scaling_calibration.scaler
if scaler <= 0:
raise ValueError('The scaler in temperature scaling must be positive.')
calibrated_class_predictions_with_background = tf.math.divide(
class_predictions_with_background,
scaler,
name='calibrate_score')
return calibrated_class_predictions_with_background
# TODO(zbeaver): Add sigmoid calibration.
else:
raise ValueError('No calibration builder defined for "Oneof" in '
'calibration_config.')
return calibration_fn
| 10,328 | 40.151394 | 80 | py |
models | models-master/research/object_detection/builders/box_coder_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.protos import box_coder_pb2
def build(box_coder_config):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if not isinstance(box_coder_config, box_coder_pb2.BoxCoder):
raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.')
if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder':
return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[
box_coder_config.faster_rcnn_box_coder.y_scale,
box_coder_config.faster_rcnn_box_coder.x_scale,
box_coder_config.faster_rcnn_box_coder.height_scale,
box_coder_config.faster_rcnn_box_coder.width_scale
])
if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder':
return keypoint_box_coder.KeypointBoxCoder(
box_coder_config.keypoint_box_coder.num_keypoints,
scale_factors=[
box_coder_config.keypoint_box_coder.y_scale,
box_coder_config.keypoint_box_coder.x_scale,
box_coder_config.keypoint_box_coder.height_scale,
box_coder_config.keypoint_box_coder.width_scale
])
if (box_coder_config.WhichOneof('box_coder_oneof') ==
'mean_stddev_box_coder'):
return mean_stddev_box_coder.MeanStddevBoxCoder(
stddev=box_coder_config.mean_stddev_box_coder.stddev)
if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder':
return square_box_coder.SquareBoxCoder(scale_factors=[
box_coder_config.square_box_coder.y_scale,
box_coder_config.square_box_coder.x_scale,
box_coder_config.square_box_coder.length_scale
])
raise ValueError('Empty box coder.')
| 2,888 | 42.119403 | 80 | py |
models | models-master/research/object_detection/builders/region_similarity_calculator_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for region similarity calculators."""
from object_detection.core import region_similarity_calculator
from object_detection.protos import region_similarity_calculator_pb2
def build(region_similarity_calculator_config):
"""Builds region similarity calculator based on the configuration.
Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See
core/region_similarity_calculator.proto for details.
Args:
region_similarity_calculator_config: RegionSimilarityCalculator
configuration proto.
Returns:
region_similarity_calculator: RegionSimilarityCalculator object.
Raises:
ValueError: On unknown region similarity calculator.
"""
if not isinstance(
region_similarity_calculator_config,
region_similarity_calculator_pb2.RegionSimilarityCalculator):
raise ValueError(
'region_similarity_calculator_config not of type '
'region_similarity_calculator_pb2.RegionsSimilarityCalculator')
similarity_calculator = region_similarity_calculator_config.WhichOneof(
'region_similarity')
if similarity_calculator == 'iou_similarity':
return region_similarity_calculator.IouSimilarity()
if similarity_calculator == 'ioa_similarity':
return region_similarity_calculator.IoaSimilarity()
if similarity_calculator == 'neg_sq_dist_similarity':
return region_similarity_calculator.NegSqDistSimilarity()
if similarity_calculator == 'thresholded_iou_similarity':
return region_similarity_calculator.ThresholdedIouSimilarity(
region_similarity_calculator_config.thresholded_iou_similarity
.iou_threshold)
raise ValueError('Unknown region similarity calculator.')
| 2,383 | 38.733333 | 80 | py |
models | models-master/research/object_detection/builders/matcher_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from object_detection.matchers import argmax_matcher
from object_detection.protos import matcher_pb2
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
def build(matcher_config):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if not isinstance(matcher_config, matcher_pb2.Matcher):
raise ValueError('matcher_config not of type matcher_pb2.Matcher.')
if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher':
matcher = matcher_config.argmax_matcher
matched_threshold = unmatched_threshold = None
if not matcher.ignore_thresholds:
matched_threshold = matcher.matched_threshold
unmatched_threshold = matcher.unmatched_threshold
return argmax_matcher.ArgMaxMatcher(
matched_threshold=matched_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched,
force_match_for_each_row=matcher.force_match_for_each_row,
use_matmul_gather=matcher.use_matmul_gather)
if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher':
if tf_version.is_tf2():
raise ValueError('bipartite_matcher is not supported in TF 2.X')
matcher = matcher_config.bipartite_matcher
return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather)
raise ValueError('Empty matcher.')
| 2,436 | 40.305085 | 96 | py |
models | models-master/research/object_detection/builders/anchor_generator_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection anchor generator from config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
from object_detection.anchor_generators import flexible_grid_anchor_generator
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.protos import anchor_generator_pb2
def build(anchor_generator_config):
"""Builds an anchor generator based on the config.
Args:
anchor_generator_config: An anchor_generator.proto object containing the
config for the desired anchor generator.
Returns:
Anchor generator based on the config.
Raises:
ValueError: On empty anchor generator proto.
"""
if not isinstance(anchor_generator_config,
anchor_generator_pb2.AnchorGenerator):
raise ValueError('anchor_generator_config not of type '
'anchor_generator_pb2.AnchorGenerator')
if anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'grid_anchor_generator':
grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator
return grid_anchor_generator.GridAnchorGenerator(
scales=[float(scale) for scale in grid_anchor_generator_config.scales],
aspect_ratios=[float(aspect_ratio)
for aspect_ratio
in grid_anchor_generator_config.aspect_ratios],
base_anchor_size=[grid_anchor_generator_config.height,
grid_anchor_generator_config.width],
anchor_stride=[grid_anchor_generator_config.height_stride,
grid_anchor_generator_config.width_stride],
anchor_offset=[grid_anchor_generator_config.height_offset,
grid_anchor_generator_config.width_offset])
elif anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'ssd_anchor_generator':
ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator
anchor_strides = None
if ssd_anchor_generator_config.height_stride:
anchor_strides = list(
zip(ssd_anchor_generator_config.height_stride,
ssd_anchor_generator_config.width_stride))
anchor_offsets = None
if ssd_anchor_generator_config.height_offset:
anchor_offsets = list(
zip(ssd_anchor_generator_config.height_offset,
ssd_anchor_generator_config.width_offset))
return multiple_grid_anchor_generator.create_ssd_anchors(
num_layers=ssd_anchor_generator_config.num_layers,
min_scale=ssd_anchor_generator_config.min_scale,
max_scale=ssd_anchor_generator_config.max_scale,
scales=[float(scale) for scale in ssd_anchor_generator_config.scales],
aspect_ratios=ssd_anchor_generator_config.aspect_ratios,
interpolated_scale_aspect_ratio=(
ssd_anchor_generator_config.interpolated_scale_aspect_ratio),
base_anchor_size=[
ssd_anchor_generator_config.base_anchor_height,
ssd_anchor_generator_config.base_anchor_width
],
anchor_strides=anchor_strides,
anchor_offsets=anchor_offsets,
reduce_boxes_in_lowest_layer=(
ssd_anchor_generator_config.reduce_boxes_in_lowest_layer))
elif anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'multiscale_anchor_generator':
cfg = anchor_generator_config.multiscale_anchor_generator
return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator(
cfg.min_level,
cfg.max_level,
cfg.anchor_scale,
[float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios],
cfg.scales_per_octave,
cfg.normalize_coordinates
)
elif anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'flexible_grid_anchor_generator':
cfg = anchor_generator_config.flexible_grid_anchor_generator
base_sizes = []
aspect_ratios = []
strides = []
offsets = []
for anchor_grid in cfg.anchor_grid:
base_sizes.append(tuple(anchor_grid.base_sizes))
aspect_ratios.append(tuple(anchor_grid.aspect_ratios))
strides.append((anchor_grid.height_stride, anchor_grid.width_stride))
offsets.append((anchor_grid.height_offset, anchor_grid.width_offset))
return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates)
else:
raise ValueError('Empty anchor generator.')
| 5,382 | 45.405172 | 80 | py |
models | models-master/research/object_detection/builders/image_resizer_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import image_resizer_builder
from object_detection.protos import image_resizer_pb2
from object_detection.utils import test_case
class ImageResizerBuilderTest(test_case.TestCase):
def _shape_of_resized_random_image_given_text_proto(self, input_shape,
text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
def graph_fn():
images = tf.cast(
tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
resized_images, _ = image_resizer_fn(images)
return resized_images
return self.execute_cpu(graph_fn, []).shape
def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_grayscale(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_with_padding(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
pad_to_max_dimension: true
per_channel_pad_value: 3
per_channel_pad_value: 4
per_channel_pad_value: 5
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_grayscale(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_identity_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
identity_resizer {
}
"""
input_shape = (10, 20, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_raises_error_on_invalid_input(self):
invalid_input = 'invalid_input'
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_input)
def _resized_image_given_text_proto(self, image, text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
def graph_fn(image):
resized_image, _ = image_resizer_fn(image)
return resized_image
return self.execute_cpu(graph_fn, [image])
def test_fixed_shape_resizer_nearest_neighbor_method(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 1
width: 1
resize_method: NEAREST_NEIGHBOR
}
"""
image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
image = np.expand_dims(image, axis=0)
resized_image = self._resized_image_given_text_proto(
image, image_resizer_text_proto)
vals = np.unique(resized_image).tolist()
self.assertEqual(len(vals), 1)
self.assertEqual(vals[0], 1)
def test_build_conditional_shape_resizer_greater_returns_expected_shape(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (30, 15, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_same_shape_with_no_resize(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
}
"""
input_shape = (15, 15, 3)
expected_output_shape = (15, 15, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_smaller_returns_expected_shape(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: SMALLER
size_threshold: 30
}
"""
input_shape = (30, 15, 3)
expected_output_shape = (60, 30, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_grayscale(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
convert_to_grayscale: true
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (30, 15, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_error_on_invalid_condition(self):
invalid_image_resizer_text_proto = """
conditional_shape_resizer {
condition: INVALID
size_threshold: 30
}
"""
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_image_resizer_text_proto)
def test_build_pad_to_multiple_resizer(self):
"""Test building a pad_to_multiple_resizer from proto."""
image_resizer_text_proto = """
pad_to_multiple_resizer {
multiple: 32
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (64, 32, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_pad_to_multiple_resizer_invalid_multiple(self):
"""Test that building a pad_to_multiple_resizer errors with invalid multiple."""
image_resizer_text_proto = """
pad_to_multiple_resizer {
multiple: -10
}
"""
with self.assertRaises(ValueError):
image_resizer_builder.build(image_resizer_text_proto)
if __name__ == '__main__':
tf.test.main()
| 8,728 | 34.77459 | 84 | py |
models | models-master/research/object_detection/builders/graph_rewriter_builder_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_rewriter_builder."""
import unittest
from unittest import mock # pylint: disable=g-importing-member
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import graph_rewriter_builder
from object_detection.protos import graph_rewriter_pb2
from object_detection.utils import tf_version
if tf_version.is_tf1():
from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class QuantizationBuilderTest(tf.test.TestCase):
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
with mock.patch.object(
contrib_quantize,
'experimental_create_training_graph') as mock_quant_fn:
with mock.patch.object(slim,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewriter_proto.quantization.weight_bits = 8
graph_rewriter_proto.quantization.activation_bits = 8
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=True)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
self.assertEqual(kwargs['quant_delay'], 10)
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
with mock.patch.object(contrib_quantize,
'experimental_create_eval_graph') as mock_quant_fn:
with mock.patch.object(slim,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=False)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
mock_summarize_col.assert_called_with('quant_vars')
if __name__ == '__main__':
tf.test.main()
| 2,948 | 42.367647 | 100 | py |
models | models-master/research/object_detection/builders/anchor_generator_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.anchor_generators import flexible_grid_anchor_generator
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.builders import anchor_generator_builder
from object_detection.protos import anchor_generator_pb2
class AnchorGeneratorBuilderTest(tf.test.TestCase):
def assert_almost_list_equal(self, expected_list, actual_list, delta=None):
self.assertEqual(len(expected_list), len(actual_list))
for expected_item, actual_item in zip(expected_list, actual_list):
self.assertAlmostEqual(expected_item, actual_item, delta=delta)
def test_build_grid_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
grid_anchor_generator {
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator)
self.assertListEqual(anchor_generator_object._scales, [])
self.assertListEqual(anchor_generator_object._aspect_ratios, [])
self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0])
self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16])
self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256])
def test_build_grid_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator)
self.assert_almost_list_equal(anchor_generator_object._scales,
[0.4, 2.2])
self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
[0.3, 4.5])
self.assertAllEqual(anchor_generator_object._anchor_offset, [30, 40])
self.assertAllEqual(anchor_generator_object._anchor_stride, [10, 20])
self.assertAllEqual(anchor_generator_object._base_anchor_size, [128, 512])
def test_build_ssd_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.2, 0.2),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_custom_scales(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, math.sqrt(0.1 * 0.15)),
(0.15, math.sqrt(0.15 * 0.2)),
(0.2, math.sqrt(0.2 * 0.4)),
(0.4, math.sqrt(0.4 * 0.6)),
(0.6, math.sqrt(0.6 * 0.8)),
(0.8, math.sqrt(0.8 * 1.0))]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [0.5]
interpolated_scale_aspect_ratio: 0.5
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(0.5, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
def test_build_ssd_anchor_generator_without_reduced_boxes(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.2, 0.264),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
height_stride: 16
height_stride: 32
width_stride: 20
width_stride: 30
height_offset: 8
height_offset: 16
width_offset: 0
width_offset: 10
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.3, 0.3), (0.8, 0.894)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5), (2.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_raise_value_error_on_empty_anchor_genertor(self):
anchor_generator_text_proto = """
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
with self.assertRaises(ValueError):
anchor_generator_builder.build(anchor_generator_proto)
def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator)
for level, anchor_grid_info in zip(
range(3, 8), anchor_generator_object._anchor_grid_info):
self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))
self.assertTrue(level, anchor_grid_info['level'])
self.assertEqual(len(anchor_grid_info['info']), 4)
self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
self.assertTrue(anchor_grid_info['info'][1], 1.0)
self.assertAllClose(anchor_grid_info['info'][2],
[4.0 * 2**level, 4.0 * 2**level])
self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])
self.assertTrue(anchor_generator_object._normalize_coordinates)
def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
normalize_coordinates: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator)
self.assertFalse(anchor_generator_object._normalize_coordinates)
def test_build_flexible_anchor_generator(self):
anchor_generator_text_proto = """
flexible_grid_anchor_generator {
anchor_grid {
base_sizes: [1.5]
aspect_ratios: [1.0]
height_stride: 16
width_stride: 20
height_offset: 8
width_offset: 9
}
anchor_grid {
base_sizes: [1.0, 2.0]
aspect_ratios: [1.0, 0.5]
height_stride: 32
width_stride: 30
height_offset: 10
width_offset: 11
}
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
flexible_grid_anchor_generator.
FlexibleGridAnchorGenerator)
for actual_base_sizes, expected_base_sizes in zip(
list(anchor_generator_object._base_sizes), [(1.5,), (1.0, 2.0)]):
self.assert_almost_list_equal(expected_base_sizes, actual_base_sizes)
for actual_aspect_ratios, expected_aspect_ratios in zip(
list(anchor_generator_object._aspect_ratios), [(1.0,), (1.0, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratios,
actual_aspect_ratios)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 9), (10, 11)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
self.assertTrue(anchor_generator_object._normalize_coordinates)
if __name__ == '__main__':
tf.test.main()
| 14,292 | 41.162242 | 80 | py |
models | models-master/research/object_detection/builders/post_processing_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for post_processing_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.protos import post_processing_pb2
from object_detection.utils import test_case
class PostProcessingBuilderTest(test_case.TestCase):
def test_build_non_max_suppressor_with_correct_parameters(self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
soft_nms_sigma: 0.4
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
self.assertAlmostEqual(non_max_suppressor.keywords['soft_nms_sigma'], 0.4)
def test_build_non_max_suppressor_with_correct_parameters_classagnostic_nms(
self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 10
max_total_detections: 300
use_class_agnostic_nms: True
max_classes_per_detection: 1
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 10)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertEqual(non_max_suppressor.keywords['max_classes_per_detection'],
1)
self.assertEqual(non_max_suppressor.keywords['use_class_agnostic_nms'],
True)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
def test_build_identity_score_converter(self):
post_processing_text_proto = """
score_converter: IDENTITY
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(
post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
def graph_fn():
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
return outputs
converted_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(converted_scores, [1, 1])
def test_build_identity_score_converter_with_logit_scale(self):
post_processing_text_proto = """
score_converter: IDENTITY
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
def graph_fn():
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
return outputs
converted_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(converted_scores, [.5, .5])
def test_build_sigmoid_score_converter(self):
post_processing_text_proto = """
score_converter: SIGMOID
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale')
def test_build_softmax_score_converter(self):
post_processing_text_proto = """
score_converter: SOFTMAX
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
def test_build_softmax_score_converter_with_temperature(self):
post_processing_text_proto = """
score_converter: SOFTMAX
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
def test_build_calibrator_with_nonempty_config(self):
"""Test that identity function used when no calibration_config specified."""
# Calibration config maps all scores to 0.5.
post_processing_text_proto = """
score_converter: SOFTMAX
calibration_config {
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: 0.5
}
x_y_pair {
x: 1.0
y: 0.5
}}}}"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, calibrated_score_conversion_fn = post_processing_builder.build(
post_processing_config)
self.assertEqual(calibrated_score_conversion_fn.__name__,
'calibrate_with_function_approximation')
def graph_fn():
input_scores = tf.constant([1, 1], tf.float32)
outputs = calibrated_score_conversion_fn(input_scores)
return outputs
calibrated_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(calibrated_scores, [0.5, 0.5])
def test_build_temperature_scaling_calibrator(self):
post_processing_text_proto = """
score_converter: SOFTMAX
calibration_config {
temperature_scaling_calibration {
scaler: 2.0
}}"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, calibrated_score_conversion_fn = post_processing_builder.build(
post_processing_config)
self.assertEqual(calibrated_score_conversion_fn.__name__,
'calibrate_with_temperature_scaling_calibration')
def graph_fn():
input_scores = tf.constant([1, 1], tf.float32)
outputs = calibrated_score_conversion_fn(input_scores)
return outputs
calibrated_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(calibrated_scores, [0.5, 0.5])
if __name__ == '__main__':
tf.test.main()
| 7,791 | 40.892473 | 80 | py |
models | models-master/research/object_detection/builders/losses_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import losses_builder
from object_detection.core import losses
from object_detection.protos import losses_pb2
from object_detection.utils import ops
class LocalizationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_l2_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedL2LocalizationLoss)
def test_build_weighted_smooth_l1_localization_loss_default_delta(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss)
self.assertAlmostEqual(localization_loss._delta, 1.0)
def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
delta: 0.1
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss)
self.assertAlmostEqual(localization_loss._delta, 0.1)
def test_build_weighted_iou_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_iou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedIOULocalizationLoss)
def test_build_weighted_giou_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_giou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedGIOULocalizationLoss)
def test_anchorwise_output(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss)
predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
weights = tf.constant([[1.0, 1.0]])
loss = localization_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2])
def test_raise_error_on_empty_localization_config(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder._build_localization_loss(losses_proto)
class ClassificationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSigmoidClassificationLoss)
def test_build_weighted_sigmoid_focal_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.SigmoidFocalClassificationLoss)
self.assertAlmostEqual(classification_loss._alpha, None)
self.assertAlmostEqual(classification_loss._gamma, 2.0)
def test_build_weighted_sigmoid_focal_loss_non_default(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 3.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.SigmoidFocalClassificationLoss)
self.assertAlmostEqual(classification_loss._alpha, 0.25)
self.assertAlmostEqual(classification_loss._gamma, 3.0)
def test_build_weighted_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
def test_build_weighted_logits_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_logits_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(
classification_loss,
losses.WeightedSoftmaxClassificationAgainstLogitsLoss)
def test_build_weighted_softmax_classification_loss_with_logit_scale(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
logit_scale: 2.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
def test_build_bootstrapped_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
bootstrapped_sigmoid {
alpha: 0.5
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.BootstrappedSigmoidClassificationLoss)
def test_anchorwise_output(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSigmoidClassificationLoss)
predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])
targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])
weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]])
loss = classification_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2, 3])
def test_raise_error_on_empty_config(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
def test_build_penalty_reduced_logistic_focal_loss(self):
losses_text_proto = """
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
localization_loss {
l1_localization_loss {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.PenaltyReducedLogisticFocalLoss)
self.assertAlmostEqual(classification_loss._alpha, 2.0)
self.assertAlmostEqual(classification_loss._beta, 4.0)
def test_build_dice_loss(self):
losses_text_proto = """
classification_loss {
weighted_dice_classification_loss {
squared_normalization: true
}
}
localization_loss {
l1_localization_loss {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedDiceClassificationLoss)
assert classification_loss._squared_normalization
class HardExampleMinerBuilderTest(tf.test.TestCase):
def test_do_not_build_hard_example_miner_by_default(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertEqual(hard_example_miner, None)
def test_build_hard_example_miner_for_classification_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: CLASSIFICATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertEqual(hard_example_miner._loss_type, 'cls')
def test_build_hard_example_miner_for_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: LOCALIZATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertEqual(hard_example_miner._loss_type, 'loc')
def test_build_hard_example_miner_with_non_default_values(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
num_hard_examples: 32
iou_threshold: 0.5
loss_type: LOCALIZATION
max_negatives_per_positive: 10
min_negatives_per_image: 3
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertEqual(hard_example_miner._num_hard_examples, 32)
self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)
self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)
self.assertEqual(hard_example_miner._min_negatives_per_image, 3)
class LossBuilderTest(tf.test.TestCase):
def test_build_all_loss_parameters(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(localization_loss,
losses.WeightedL2LocalizationLoss)
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_build_expected_sampling(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss)
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_build_reweighting_unmatched_anchors(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertIsInstance(hard_example_miner, losses.HardExampleMiner)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss)
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_sigmoid_focal {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase):
def test_build_sigmoid_loss(self):
losses_text_proto = """
weighted_sigmoid {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSigmoidClassificationLoss)
def test_build_softmax_loss(self):
losses_text_proto = """
weighted_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
def test_build_logits_softmax_loss(self):
losses_text_proto = """
weighted_logits_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(
isinstance(classification_loss,
losses.WeightedSoftmaxClassificationAgainstLogitsLoss))
def test_build_sigmoid_focal_loss(self):
losses_text_proto = """
weighted_sigmoid_focal {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertIsInstance(classification_loss,
losses.SigmoidFocalClassificationLoss)
def test_build_softmax_loss_by_default(self):
losses_text_proto = """
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertIsInstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss)
if __name__ == '__main__':
tf.test.main()
| 19,951 | 31.442276 | 80 | py |
models | models-master/research/object_detection/builders/matcher_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for matcher_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import matcher_builder
from object_detection.matchers import argmax_matcher
from object_detection.protos import matcher_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
class MatcherBuilderTest(test_case.TestCase):
def test_build_arg_max_matcher_with_defaults(self):
matcher_text_proto = """
argmax_matcher {
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertAlmostEqual(matcher_object._matched_threshold, 0.5)
self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5)
self.assertTrue(matcher_object._negatives_lower_than_unmatched)
self.assertFalse(matcher_object._force_match_for_each_row)
def test_build_arg_max_matcher_without_thresholds(self):
matcher_text_proto = """
argmax_matcher {
ignore_thresholds: true
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertEqual(matcher_object._matched_threshold, None)
self.assertEqual(matcher_object._unmatched_threshold, None)
self.assertTrue(matcher_object._negatives_lower_than_unmatched)
self.assertFalse(matcher_object._force_match_for_each_row)
def test_build_arg_max_matcher_with_non_default_parameters(self):
matcher_text_proto = """
argmax_matcher {
matched_threshold: 0.7
unmatched_threshold: 0.3
negatives_lower_than_unmatched: false
force_match_for_each_row: true
use_matmul_gather: true
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertAlmostEqual(matcher_object._matched_threshold, 0.7)
self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3)
self.assertFalse(matcher_object._negatives_lower_than_unmatched)
self.assertTrue(matcher_object._force_match_for_each_row)
self.assertTrue(matcher_object._use_matmul_gather)
def test_build_bipartite_matcher(self):
if tf_version.is_tf2():
self.skipTest('BipartiteMatcher unsupported in TF 2.X. Skipping.')
matcher_text_proto = """
bipartite_matcher {
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object,
bipartite_matcher.GreedyBipartiteMatcher)
def test_raise_error_on_empty_matcher(self):
matcher_text_proto = """
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
with self.assertRaises(ValueError):
matcher_builder.build(matcher_proto)
if __name__ == '__main__':
tf.test.main()
| 4,144 | 38.103774 | 96 | py |
models | models-master/research/object_detection/builders/box_coder_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.builders import box_coder_builder
from object_detection.protos import box_coder_pb2
class BoxCoderBuilderTest(tf.test.TestCase):
def test_build_faster_rcnn_box_coder_with_defaults(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_faster_rcnn_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_keypoint_box_coder_with_defaults(self):
box_coder_text_proto = """
keypoint_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_keypoint_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
keypoint_box_coder {
num_keypoints: 6
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._num_keypoints, 6)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_mean_stddev_box_coder(self):
box_coder_text_proto = """
mean_stddev_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object,
mean_stddev_box_coder.MeanStddevBoxCoder))
def test_build_square_box_coder_with_defaults(self):
box_coder_text_proto = """
square_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0])
def test_build_square_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0])
def test_raise_error_on_empty_box_coder(self):
box_coder_text_proto = """
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
with self.assertRaises(ValueError):
box_coder_builder.build(box_coder_proto)
if __name__ == '__main__':
tf.test.main()
| 5,168 | 36.729927 | 80 | py |
models | models-master/research/object_detection/builders/optimizer_builder_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf1():
from tensorflow.contrib import opt as contrib_opt
# pylint: enable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class LearningRateBuilderTest(tf.test.TestCase):
def testBuildConstantLearningRate(self):
learning_rate_text_proto = """
constant_learning_rate {
learning_rate: 0.004
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(
six.ensure_str(learning_rate.op.name).endswith('learning_rate'))
with self.test_session():
learning_rate_out = learning_rate.eval()
self.assertAlmostEqual(learning_rate_out, 0.004)
def testBuildExponentialDecayLearningRate(self):
learning_rate_text_proto = """
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 99999
decay_factor: 0.85
staircase: false
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(
six.ensure_str(learning_rate.op.name).endswith('learning_rate'))
self.assertIsInstance(learning_rate, tf.Tensor)
def testBuildManualStepLearningRate(self):
learning_rate_text_proto = """
manual_step_learning_rate {
initial_learning_rate: 0.002
schedule {
step: 100
learning_rate: 0.006
}
schedule {
step: 90000
learning_rate: 0.00006
}
warmup: true
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertIsInstance(learning_rate, tf.Tensor)
def testBuildCosineDecayLearningRate(self):
learning_rate_text_proto = """
cosine_decay_learning_rate {
learning_rate_base: 0.002
total_steps: 20000
warmup_learning_rate: 0.0001
warmup_steps: 1000
hold_base_rate_steps: 20000
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertIsInstance(learning_rate, tf.Tensor)
def testRaiseErrorOnEmptyLearningRate(self):
learning_rate_text_proto = """
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
with self.assertRaises(ValueError):
optimizer_builder._create_learning_rate(learning_rate_proto)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class OptimizerBuilderTest(tf.test.TestCase):
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.RMSPropOptimizer)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.MomentumOptimizer)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
epsilon: 1e-6
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.AdamOptimizer)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
def testBuildMovingAverageOptimizerWithNonDefaultDecay(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
moving_average_decay: 0.2
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
# TODO(rathodv): Find a way to not depend on the private members.
self.assertAlmostEqual(optimizer._ema._decay, 0.2)
def testBuildEmptyOptimizer(self):
optimizer_text_proto = """
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
with self.assertRaises(ValueError):
optimizer_builder.build(optimizer_proto)
if __name__ == '__main__':
tf.test.main()
| 7,390 | 31.995536 | 80 | py |
models | models-master/research/object_detection/builders/optimizer_builder_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class OptimizerBuilderV2Test(tf.test.TestCase):
"""Test building optimizers in V2 mode."""
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.SGD)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Adam)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Optimizer)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 3,378 | 31.180952 | 80 | py |
models | models-master/research/object_detection/builders/input_reader_builder_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import os
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import input_reader_builder
from object_detection.core import standard_fields as fields
from object_detection.dataset_tools import seq_example_util
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
parent_path = os.path.dirname(tf.resource_loader.get_data_files_path())
return os.path.join(parent_path, 'data',
'pet_label_map.pbtxt')
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class InputReaderBuilderTest(tf.test.TestCase):
def create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}))
writer.write(example.SerializeToString())
writer.close()
return path
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
with tf.Session() as sess:
encoded_images = sess.run(encoded_images_list)
return encoded_images
def create_tf_record_sequence_example(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
num_frames = 4
image_height = 20
image_width = 30
image_source_ids = [str(i) for i in range(num_frames)]
with self.test_session():
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_source_ids=image_source_ids,
image_format='JPEG',
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[]], # Frame 0.
[[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.],
[0.1, 0.1, 0.2, 0.2]], # Frame 2.
[[]], # Frame 3.
],
label_strings=[
[], # Frame 0.
['Abyssinian'], # Frame 1.
['Abyssinian', 'american_bulldog'], # Frame 2.
[], # Frame 3
]).SerializeToString()
writer.write(sequence_example_serialized)
writer.close()
return path
def create_tf_record_with_context(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
context_features = (10 * 3) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height':
dataset_util.int64_feature(4),
'image/width':
dataset_util.int64_feature(5),
'image/object/bbox/xmin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax':
dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax':
dataset_util.float_list_feature([1.0]),
'image/object/class/label':
dataset_util.int64_list_feature([2]),
'image/object/mask':
dataset_util.float_list_feature(flat_mask),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_list_feature([10]),
}))
writer.write(example.SerializeToString())
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
output_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
def test_build_tf_record_input_reader_sequence_example(self):
tf_record_path = self.create_tf_record_sequence_example()
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: TF_SEQUENCE_EXAMPLE
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
input_reader_proto.label_map_path = _get_labelmap_path()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]]
expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
expected_num_groundtruth_boxes = [0, 1, 2, 0]
self.assertNotIn(
fields.InputDataFields.groundtruth_instance_masks, output_dict)
# sequence example images are encoded
self.assertEqual((4,), output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual(expected_groundtruth_classes,
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(4, 2, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllClose(expected_groundtruth_boxes,
output_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllClose(
expected_num_groundtruth_boxes,
output_dict[fields.InputDataFields.num_groundtruth_boxes])
def test_build_tf_record_input_reader_with_context(self):
tf_record_path = self.create_tf_record_with_context()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_reader_proto.load_context_features = True
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
output_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
(3, 10), output_dict[fields.InputDataFields.context_features].shape)
self.assertAllEqual(
(10), output_dict[fields.InputDataFields.context_feature_length])
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
(1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
input_reader_builder.build(input_reader_proto)
if __name__ == '__main__':
tf.test.main()
| 12,269 | 38.967427 | 80 | py |
models | models-master/research/object_detection/builders/model_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build a DetectionModel from configuration."""
import functools
import sys
from absl import logging
from object_detection.builders import anchor_generator_builder
from object_detection.builders import box_coder_builder
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import losses_builder
from object_detection.builders import matcher_builder
from object_detection.builders import post_processing_builder
from object_detection.builders import region_similarity_calculator_builder as sim_calc
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import post_processing
from object_detection.core import target_assigner
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.meta_architectures import rfcn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.predictors.heads import mask_head
from object_detection.protos import losses_pb2
from object_detection.protos import model_pb2
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import tf_version
## Feature Extractors for TF
## This section conditionally imports different feature extractors based on the
## Tensorflow version.
##
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models import center_net_mobilenet_v2_feature_extractor
from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras
from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras
from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras
from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor
from object_detection.predictors import rfcn_keras_box_predictor
if sys.version_info[0] >= 3:
from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn
if tf_version.is_tf1():
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn
from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor
from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor
from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallPrunedFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor
from object_detection.models.ssd_spaghettinet_feature_extractor import SSDSpaghettinetFeatureExtractor
from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor
from object_detection.predictors import rfcn_box_predictor
# pylint: enable=g-import-not-at-top
if tf_version.is_tf2():
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor,
'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor,
'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor,
'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor,
'ssd_resnet50_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor,
'ssd_resnet101_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor,
'ssd_resnet152_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor,
'ssd_efficientnet-b0_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b1_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b2_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b3_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b4_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b5_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b6_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b7_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor,
}
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_resnet50_keras':
frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor,
'faster_rcnn_resnet101_keras':
frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor,
'faster_rcnn_resnet152_keras':
frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor,
'faster_rcnn_inception_resnet_v2_keras':
frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor,
'faster_rcnn_resnet50_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor,
'faster_rcnn_resnet101_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor,
'faster_rcnn_resnet152_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor,
}
CENTER_NET_EXTRACTOR_FUNCTION_MAP = {
'resnet_v2_50':
center_net_resnet_feature_extractor.resnet_v2_50,
'resnet_v2_101':
center_net_resnet_feature_extractor.resnet_v2_101,
'resnet_v1_18_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_18_fpn,
'resnet_v1_34_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_34_fpn,
'resnet_v1_50_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn,
'resnet_v1_101_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn,
'hourglass_10':
center_net_hourglass_feature_extractor.hourglass_10,
'hourglass_20':
center_net_hourglass_feature_extractor.hourglass_20,
'hourglass_32':
center_net_hourglass_feature_extractor.hourglass_32,
'hourglass_52':
center_net_hourglass_feature_extractor.hourglass_52,
'hourglass_104':
center_net_hourglass_feature_extractor.hourglass_104,
'mobilenet_v2':
center_net_mobilenet_v2_feature_extractor.mobilenet_v2,
'mobilenet_v2_fpn':
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn,
'mobilenet_v2_fpn_sep_conv':
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn,
}
FEATURE_EXTRACTOR_MAPS = [
CENTER_NET_EXTRACTOR_FUNCTION_MAP,
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP,
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
]
if tf_version.is_tf1():
SSD_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_inception_v2':
SSDInceptionV2FeatureExtractor,
'ssd_inception_v3':
SSDInceptionV3FeatureExtractor,
'ssd_mobilenet_v1':
SSDMobileNetV1FeatureExtractor,
'ssd_mobilenet_v1_fpn':
SSDMobileNetV1FpnFeatureExtractor,
'ssd_mobilenet_v1_ppn':
SSDMobileNetV1PpnFeatureExtractor,
'ssd_mobilenet_v2':
SSDMobileNetV2FeatureExtractor,
'ssd_mobilenet_v2_fpn':
SSDMobileNetV2FpnFeatureExtractor,
'ssd_mobilenet_v2_mnasfpn':
SSDMobileNetV2MnasFPNFeatureExtractor,
'ssd_mobilenet_v3_large':
SSDMobileNetV3LargeFeatureExtractor,
'ssd_mobilenet_v3_small':
SSDMobileNetV3SmallFeatureExtractor,
'ssd_mobilenet_v3_small_pruned':
SSDMobileNetV3SmallPrunedFeatureExtractor,
'ssd_mobilenet_edgetpu':
SSDMobileNetEdgeTPUFeatureExtractor,
'ssd_resnet50_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor,
'ssd_resnet101_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor,
'ssd_resnet152_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor,
'ssd_resnet50_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor,
'ssd_resnet101_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor,
'ssd_resnet152_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor,
'embedded_ssd_mobilenet_v1':
EmbeddedSSDMobileNetV1FeatureExtractor,
'ssd_pnasnet':
SSDPNASNetFeatureExtractor,
'ssd_mobiledet_cpu':
SSDMobileDetCPUFeatureExtractor,
'ssd_mobiledet_dsp':
SSDMobileDetDSPFeatureExtractor,
'ssd_mobiledet_edgetpu':
SSDMobileDetEdgeTPUFeatureExtractor,
'ssd_mobiledet_gpu':
SSDMobileDetGPUFeatureExtractor,
'ssd_spaghettinet':
SSDSpaghettinetFeatureExtractor,
}
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_nas':
frcnn_nas.FasterRCNNNASFeatureExtractor,
'faster_rcnn_pnas':
frcnn_pnas.FasterRCNNPNASFeatureExtractor,
'faster_rcnn_inception_resnet_v2':
frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor,
'faster_rcnn_inception_v2':
frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor,
'faster_rcnn_resnet50':
frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'faster_rcnn_resnet101':
frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'faster_rcnn_resnet152':
frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor,
}
CENTER_NET_EXTRACTOR_FUNCTION_MAP = {}
FEATURE_EXTRACTOR_MAPS = [
SSD_FEATURE_EXTRACTOR_CLASS_MAP,
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP,
CENTER_NET_EXTRACTOR_FUNCTION_MAP
]
def _check_feature_extractor_exists(feature_extractor_type):
feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS)
if feature_extractor_type not in feature_extractors:
tf_version_str = '2' if tf_version.is_tf2() else '1'
raise ValueError(
'{} is not supported for tf version {}. See `model_builder.py` for '
'features extractors compatible with different versions of '
'Tensorflow'.format(feature_extractor_type, tf_version_str))
def _build_ssd_feature_extractor(feature_extractor_config,
is_training,
freeze_batchnorm,
reuse_weights=None):
"""Builds a ssd_meta_arch.SSDFeatureExtractor based on config.
Args:
feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto.
is_training: True if this feature extractor is being built for training.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
reuse_weights: if the feature extractor should reuse weights.
Returns:
ssd_meta_arch.SSDFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type = feature_extractor_config.type
depth_multiplier = feature_extractor_config.depth_multiplier
min_depth = feature_extractor_config.min_depth
pad_to_multiple = feature_extractor_config.pad_to_multiple
use_explicit_padding = feature_extractor_config.use_explicit_padding
use_depthwise = feature_extractor_config.use_depthwise
is_keras = tf_version.is_tf2()
if is_keras:
conv_hyperparams = hyperparams_builder.KerasLayerHyperparams(
feature_extractor_config.conv_hyperparams)
else:
conv_hyperparams = hyperparams_builder.build(
feature_extractor_config.conv_hyperparams, is_training)
override_base_feature_extractor_hyperparams = (
feature_extractor_config.override_base_feature_extractor_hyperparams)
if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type))
if is_keras:
feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
else:
feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type]
kwargs = {
'is_training':
is_training,
'depth_multiplier':
depth_multiplier,
'min_depth':
min_depth,
'pad_to_multiple':
pad_to_multiple,
'use_explicit_padding':
use_explicit_padding,
'use_depthwise':
use_depthwise,
'override_base_feature_extractor_hyperparams':
override_base_feature_extractor_hyperparams
}
if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'):
kwargs.update({
'replace_preprocessor_with_placeholder':
feature_extractor_config.replace_preprocessor_with_placeholder
})
if feature_extractor_config.HasField('num_layers'):
kwargs.update({'num_layers': feature_extractor_config.num_layers})
if is_keras:
kwargs.update({
'conv_hyperparams': conv_hyperparams,
'inplace_batchnorm_update': False,
'freeze_batchnorm': freeze_batchnorm
})
else:
kwargs.update({
'conv_hyperparams_fn': conv_hyperparams,
'reuse_weights': reuse_weights,
})
if feature_extractor_config.HasField('spaghettinet_arch_name'):
kwargs.update({
'spaghettinet_arch_name':
feature_extractor_config.spaghettinet_arch_name,
})
if feature_extractor_config.HasField('fpn'):
kwargs.update({
'fpn_min_level':
feature_extractor_config.fpn.min_level,
'fpn_max_level':
feature_extractor_config.fpn.max_level,
'additional_layer_depth':
feature_extractor_config.fpn.additional_layer_depth,
})
if feature_extractor_config.HasField('bifpn'):
kwargs.update({
'bifpn_min_level':
feature_extractor_config.bifpn.min_level,
'bifpn_max_level':
feature_extractor_config.bifpn.max_level,
'bifpn_num_iterations':
feature_extractor_config.bifpn.num_iterations,
'bifpn_num_filters':
feature_extractor_config.bifpn.num_filters,
'bifpn_combine_method':
feature_extractor_config.bifpn.combine_method,
'use_native_resize_op':
feature_extractor_config.bifpn.use_native_resize_op,
})
return feature_extractor_class(**kwargs)
def _build_ssd_model(ssd_config, is_training, add_summaries):
"""Builds an SSD detection model based on the model config.
Args:
ssd_config: A ssd.proto object containing the config for the desired
SSDMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
SSDMetaArch based on the config.
Raises:
ValueError: If ssd_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = ssd_config.num_classes
_check_feature_extractor_exists(ssd_config.feature_extractor.type)
# Feature extractor
feature_extractor = _build_ssd_feature_extractor(
feature_extractor_config=ssd_config.feature_extractor,
freeze_batchnorm=ssd_config.freeze_batchnorm,
is_training=is_training)
box_coder = box_coder_builder.build(ssd_config.box_coder)
matcher = matcher_builder.build(ssd_config.matcher)
region_similarity_calculator = sim_calc.build(
ssd_config.similarity_calculator)
encode_background_as_zeros = ssd_config.encode_background_as_zeros
negative_class_weight = ssd_config.negative_class_weight
anchor_generator = anchor_generator_builder.build(
ssd_config.anchor_generator)
if feature_extractor.is_keras_model:
ssd_box_predictor = box_predictor_builder.build_keras(
hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=False,
num_predictions_per_location_list=anchor_generator
.num_anchors_per_location(),
box_predictor_config=ssd_config.box_predictor,
is_training=is_training,
num_classes=num_classes,
add_background_class=ssd_config.add_background_class)
else:
ssd_box_predictor = box_predictor_builder.build(
hyperparams_builder.build, ssd_config.box_predictor, is_training,
num_classes, ssd_config.add_background_class)
image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
ssd_config.post_processing)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize
equalization_loss_config = ops.EqualizationLossConfig(
weight=ssd_config.loss.equalization_loss.weight,
exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
matcher,
box_coder,
negative_class_weight=negative_class_weight)
ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
kwargs = {}
return ssd_meta_arch_fn(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=ssd_box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_weight,
localization_loss_weight=localization_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=add_summaries,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
add_background_class=ssd_config.add_background_class,
explicit_background_class=ssd_config.explicit_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
use_confidences_as_targets=ssd_config.use_confidences_as_targets,
implicit_example_weight=ssd_config.implicit_example_weight,
equalization_loss_config=equalization_loss_config,
return_raw_detections_during_predict=(
ssd_config.return_raw_detections_during_predict),
**kwargs)
def _build_faster_rcnn_feature_extractor(
feature_extractor_config, is_training, reuse_weights=True,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
reuse_weights: if the feature extractor should reuse weights.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable, reuse_weights=reuse_weights)
def _build_faster_rcnn_keras_feature_extractor(
feature_extractor_config, is_training,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
kwargs = {}
if feature_extractor_config.HasField('conv_hyperparams'):
kwargs.update({
'conv_hyperparams':
hyperparams_builder.KerasLayerHyperparams(
feature_extractor_config.conv_hyperparams),
'override_base_feature_extractor_hyperparams':
feature_extractor_config.override_base_feature_extractor_hyperparams
})
if feature_extractor_config.HasField('fpn'):
kwargs.update({
'fpn_min_level':
feature_extractor_config.fpn.min_level,
'fpn_max_level':
feature_extractor_config.fpn.max_level,
'additional_layer_depth':
feature_extractor_config.fpn.additional_layer_depth,
})
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable, **kwargs)
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
"""Builds a Faster R-CNN or R-FCN detection model based on the model config.
Builds R-FCN model if the second_stage_box_predictor in the config is of type
`rfcn_box_predictor` else builds a Faster R-CNN model.
Args:
frcnn_config: A faster_rcnn.proto object containing the config for the
desired FasterRCNNMetaArch or RFCNMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
FasterRCNNMetaArch based on the config.
Raises:
ValueError: If frcnn_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = frcnn_config.num_classes
image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)
_check_feature_extractor_exists(frcnn_config.feature_extractor.type)
is_keras = tf_version.is_tf2()
if is_keras:
feature_extractor = _build_faster_rcnn_keras_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
else:
feature_extractor = _build_faster_rcnn_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
number_of_stages = frcnn_config.number_of_stages
first_stage_anchor_generator = anchor_generator_builder.build(
frcnn_config.first_stage_anchor_generator)
first_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'proposal',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
if is_keras:
first_stage_box_predictor_arg_scope_fn = (
hyperparams_builder.KerasLayerHyperparams(
frcnn_config.first_stage_box_predictor_conv_hyperparams))
else:
first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
first_stage_box_predictor_kernel_size = (
frcnn_config.first_stage_box_predictor_kernel_size)
first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
use_static_shapes = frcnn_config.use_static_shapes and (
frcnn_config.use_static_shapes_for_eval or is_training)
first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
first_stage_max_proposals = frcnn_config.first_stage_max_proposals
if (frcnn_config.first_stage_nms_iou_threshold < 0 or
frcnn_config.first_stage_nms_iou_threshold > 1.0):
raise ValueError('iou_threshold not in [0, 1.0].')
if (is_training and frcnn_config.second_stage_batch_size >
first_stage_max_proposals):
raise ValueError('second_stage_batch_size should be no greater than '
'first_stage_max_proposals.')
first_stage_non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=frcnn_config.first_stage_nms_score_threshold,
iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
max_size_per_class=frcnn_config.first_stage_max_proposals,
max_total_size=frcnn_config.first_stage_max_proposals,
use_static_shapes=use_static_shapes,
use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage,
use_combined_nms=frcnn_config.use_combined_nms_in_first_stage)
first_stage_loc_loss_weight = (
frcnn_config.first_stage_localization_loss_weight)
first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight
initial_crop_size = frcnn_config.initial_crop_size
maxpool_kernel_size = frcnn_config.maxpool_kernel_size
maxpool_stride = frcnn_config.maxpool_stride
second_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'detection',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
if is_keras:
second_stage_box_predictor = box_predictor_builder.build_keras(
hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[1],
box_predictor_config=frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
else:
second_stage_box_predictor = box_predictor_builder.build(
hyperparams_builder.build,
frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
second_stage_batch_size = frcnn_config.second_stage_batch_size
second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.second_stage_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
(second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
second_stage_localization_loss_weight = (
frcnn_config.second_stage_localization_loss_weight)
second_stage_classification_loss = (
losses_builder.build_faster_rcnn_classification_loss(
frcnn_config.second_stage_classification_loss))
second_stage_classification_loss_weight = (
frcnn_config.second_stage_classification_loss_weight)
second_stage_mask_prediction_loss_weight = (
frcnn_config.second_stage_mask_prediction_loss_weight)
hard_example_miner = None
if frcnn_config.HasField('hard_example_miner'):
hard_example_miner = losses_builder.build_hard_example_miner(
frcnn_config.hard_example_miner,
second_stage_classification_loss_weight,
second_stage_localization_loss_weight)
crop_and_resize_fn = (
spatial_ops.multilevel_matmul_crop_and_resize
if frcnn_config.use_matmul_crop_and_resize
else spatial_ops.multilevel_native_crop_and_resize)
clip_anchors_to_image = (
frcnn_config.clip_anchors_to_image)
common_kwargs = {
'is_training':
is_training,
'num_classes':
num_classes,
'image_resizer_fn':
image_resizer_fn,
'feature_extractor':
feature_extractor,
'number_of_stages':
number_of_stages,
'first_stage_anchor_generator':
first_stage_anchor_generator,
'first_stage_target_assigner':
first_stage_target_assigner,
'first_stage_atrous_rate':
first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope_fn':
first_stage_box_predictor_arg_scope_fn,
'first_stage_box_predictor_kernel_size':
first_stage_box_predictor_kernel_size,
'first_stage_box_predictor_depth':
first_stage_box_predictor_depth,
'first_stage_minibatch_size':
first_stage_minibatch_size,
'first_stage_sampler':
first_stage_sampler,
'first_stage_non_max_suppression_fn':
first_stage_non_max_suppression_fn,
'first_stage_max_proposals':
first_stage_max_proposals,
'first_stage_localization_loss_weight':
first_stage_loc_loss_weight,
'first_stage_objectness_loss_weight':
first_stage_obj_loss_weight,
'second_stage_target_assigner':
second_stage_target_assigner,
'second_stage_batch_size':
second_stage_batch_size,
'second_stage_sampler':
second_stage_sampler,
'second_stage_non_max_suppression_fn':
second_stage_non_max_suppression_fn,
'second_stage_score_conversion_fn':
second_stage_score_conversion_fn,
'second_stage_localization_loss_weight':
second_stage_localization_loss_weight,
'second_stage_classification_loss':
second_stage_classification_loss,
'second_stage_classification_loss_weight':
second_stage_classification_loss_weight,
'hard_example_miner':
hard_example_miner,
'add_summaries':
add_summaries,
'crop_and_resize_fn':
crop_and_resize_fn,
'clip_anchors_to_image':
clip_anchors_to_image,
'use_static_shapes':
use_static_shapes,
'resize_masks':
frcnn_config.resize_masks,
'return_raw_detections_during_predict':
frcnn_config.return_raw_detections_during_predict,
'output_final_box_features':
frcnn_config.output_final_box_features,
'output_final_box_rpn_features':
frcnn_config.output_final_box_rpn_features,
}
if ((not is_keras and isinstance(second_stage_box_predictor,
rfcn_box_predictor.RfcnBoxPredictor)) or
(is_keras and
isinstance(second_stage_box_predictor,
rfcn_keras_box_predictor.RfcnKerasBoxPredictor))):
return rfcn_meta_arch.RFCNMetaArch(
second_stage_rfcn_box_predictor=second_stage_box_predictor,
**common_kwargs)
elif frcnn_config.HasField('context_config'):
context_config = frcnn_config.context_config
common_kwargs.update({
'attention_bottleneck_dimension':
context_config.attention_bottleneck_dimension,
'attention_temperature':
context_config.attention_temperature,
'use_self_attention':
context_config.use_self_attention,
'use_long_term_attention':
context_config.use_long_term_attention,
'self_attention_in_sequence':
context_config.self_attention_in_sequence,
'num_attention_heads':
context_config.num_attention_heads,
'num_attention_layers':
context_config.num_attention_layers,
'attention_position':
context_config.attention_position
})
return context_rcnn_meta_arch.ContextRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
else:
return faster_rcnn_meta_arch.FasterRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
EXPERIMENTAL_META_ARCH_BUILDER_MAP = {
}
def _build_experimental_model(config, is_training, add_summaries=True):
return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name](
is_training, add_summaries)
# The class ID in the groundtruth/model architecture is usually 0-based while
# the ID in the label map is 1-based. The offset is used to convert between the
# the two.
CLASS_ID_OFFSET = 1
KEYPOINT_STD_DEV_DEFAULT = 1.0
def keypoint_proto_to_params(kp_config, keypoint_map_dict):
"""Converts CenterNet.KeypointEstimation proto to parameter namedtuple."""
label_map_item = keypoint_map_dict[kp_config.keypoint_class_name]
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(kp_config.loss))
keypoint_indices = [
keypoint.id for keypoint in label_map_item.keypoints
]
keypoint_labels = [
keypoint.label for keypoint in label_map_item.keypoints
]
keypoint_std_dev_dict = {
label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels
}
if kp_config.keypoint_label_to_std:
for label, value in kp_config.keypoint_label_to_std.items():
keypoint_std_dev_dict[label] = value
keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels]
if kp_config.HasField('heatmap_head_params'):
heatmap_head_num_filters = list(kp_config.heatmap_head_params.num_filters)
heatmap_head_kernel_sizes = list(kp_config.heatmap_head_params.kernel_sizes)
else:
heatmap_head_num_filters = [256]
heatmap_head_kernel_sizes = [3]
if kp_config.HasField('offset_head_params'):
offset_head_num_filters = list(kp_config.offset_head_params.num_filters)
offset_head_kernel_sizes = list(kp_config.offset_head_params.kernel_sizes)
else:
offset_head_num_filters = [256]
offset_head_kernel_sizes = [3]
if kp_config.HasField('regress_head_params'):
regress_head_num_filters = list(kp_config.regress_head_params.num_filters)
regress_head_kernel_sizes = list(
kp_config.regress_head_params.kernel_sizes)
else:
regress_head_num_filters = [256]
regress_head_kernel_sizes = [3]
return center_net_meta_arch.KeypointEstimationParams(
task_name=kp_config.task_name,
class_id=label_map_item.id - CLASS_ID_OFFSET,
keypoint_indices=keypoint_indices,
classification_loss=classification_loss,
localization_loss=localization_loss,
keypoint_labels=keypoint_labels,
keypoint_std_dev=keypoint_std_dev,
task_loss_weight=kp_config.task_loss_weight,
keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight,
keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight,
keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight,
heatmap_bias_init=kp_config.heatmap_bias_init,
keypoint_candidate_score_threshold=(
kp_config.keypoint_candidate_score_threshold),
num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint,
peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size,
unmatched_keypoint_score=kp_config.unmatched_keypoint_score,
box_scale=kp_config.box_scale,
candidate_search_scale=kp_config.candidate_search_scale,
candidate_ranking_mode=kp_config.candidate_ranking_mode,
offset_peak_radius=kp_config.offset_peak_radius,
per_keypoint_offset=kp_config.per_keypoint_offset,
predict_depth=kp_config.predict_depth,
per_keypoint_depth=kp_config.per_keypoint_depth,
keypoint_depth_loss_weight=kp_config.keypoint_depth_loss_weight,
score_distance_offset=kp_config.score_distance_offset,
clip_out_of_frame_keypoints=kp_config.clip_out_of_frame_keypoints,
rescore_instances=kp_config.rescore_instances,
heatmap_head_num_filters=heatmap_head_num_filters,
heatmap_head_kernel_sizes=heatmap_head_kernel_sizes,
offset_head_num_filters=offset_head_num_filters,
offset_head_kernel_sizes=offset_head_kernel_sizes,
regress_head_num_filters=regress_head_num_filters,
regress_head_kernel_sizes=regress_head_kernel_sizes,
score_distance_multiplier=kp_config.score_distance_multiplier,
std_dev_multiplier=kp_config.std_dev_multiplier,
rescoring_threshold=kp_config.rescoring_threshold,
gaussian_denom_ratio=kp_config.gaussian_denom_ratio,
argmax_postprocessing=kp_config.argmax_postprocessing)
def object_detection_proto_to_params(od_config):
"""Converts CenterNet.ObjectDetection proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy classification loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the classification loss
# directly.
loss.classification_loss.weighted_sigmoid.CopyFrom(
losses_pb2.WeightedSigmoidClassificationLoss())
loss.localization_loss.CopyFrom(od_config.localization_loss)
_, localization_loss, _, _, _, _, _ = (losses_builder.build(loss))
if od_config.HasField('scale_head_params'):
scale_head_num_filters = list(od_config.scale_head_params.num_filters)
scale_head_kernel_sizes = list(od_config.scale_head_params.kernel_sizes)
else:
scale_head_num_filters = [256]
scale_head_kernel_sizes = [3]
if od_config.HasField('offset_head_params'):
offset_head_num_filters = list(od_config.offset_head_params.num_filters)
offset_head_kernel_sizes = list(od_config.offset_head_params.kernel_sizes)
else:
offset_head_num_filters = [256]
offset_head_kernel_sizes = [3]
return center_net_meta_arch.ObjectDetectionParams(
localization_loss=localization_loss,
scale_loss_weight=od_config.scale_loss_weight,
offset_loss_weight=od_config.offset_loss_weight,
task_loss_weight=od_config.task_loss_weight,
scale_head_num_filters=scale_head_num_filters,
scale_head_kernel_sizes=scale_head_kernel_sizes,
offset_head_num_filters=offset_head_num_filters,
offset_head_kernel_sizes=offset_head_kernel_sizes)
def object_center_proto_to_params(oc_config):
"""Converts CenterNet.ObjectCenter proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the localization loss
# directly.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(oc_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
keypoint_weights_for_center = []
if oc_config.keypoint_weights_for_center:
keypoint_weights_for_center = list(oc_config.keypoint_weights_for_center)
if oc_config.HasField('center_head_params'):
center_head_num_filters = list(oc_config.center_head_params.num_filters)
center_head_kernel_sizes = list(oc_config.center_head_params.kernel_sizes)
else:
center_head_num_filters = [256]
center_head_kernel_sizes = [3]
return center_net_meta_arch.ObjectCenterParams(
classification_loss=classification_loss,
object_center_loss_weight=oc_config.object_center_loss_weight,
heatmap_bias_init=oc_config.heatmap_bias_init,
min_box_overlap_iou=oc_config.min_box_overlap_iou,
max_box_predictions=oc_config.max_box_predictions,
use_labeled_classes=oc_config.use_labeled_classes,
keypoint_weights_for_center=keypoint_weights_for_center,
center_head_num_filters=center_head_num_filters,
center_head_kernel_sizes=center_head_kernel_sizes,
peak_max_pool_kernel_size=oc_config.peak_max_pool_kernel_size)
def mask_proto_to_params(mask_config):
"""Converts CenterNet.MaskEstimation proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(mask_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
if mask_config.HasField('mask_head_params'):
mask_head_num_filters = list(mask_config.mask_head_params.num_filters)
mask_head_kernel_sizes = list(mask_config.mask_head_params.kernel_sizes)
else:
mask_head_num_filters = [256]
mask_head_kernel_sizes = [3]
return center_net_meta_arch.MaskParams(
classification_loss=classification_loss,
task_loss_weight=mask_config.task_loss_weight,
mask_height=mask_config.mask_height,
mask_width=mask_config.mask_width,
score_threshold=mask_config.score_threshold,
heatmap_bias_init=mask_config.heatmap_bias_init,
mask_head_num_filters=mask_head_num_filters,
mask_head_kernel_sizes=mask_head_kernel_sizes)
def densepose_proto_to_params(densepose_config):
"""Converts CenterNet.DensePoseEstimation proto to parameter namedtuple."""
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(densepose_config.loss))
return center_net_meta_arch.DensePoseParams(
class_id=densepose_config.class_id,
classification_loss=classification_loss,
localization_loss=localization_loss,
part_loss_weight=densepose_config.part_loss_weight,
coordinate_loss_weight=densepose_config.coordinate_loss_weight,
num_parts=densepose_config.num_parts,
task_loss_weight=densepose_config.task_loss_weight,
upsample_to_input_res=densepose_config.upsample_to_input_res,
heatmap_bias_init=densepose_config.heatmap_bias_init)
def tracking_proto_to_params(tracking_config):
"""Converts CenterNet.TrackEstimation proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the localization loss
# directly.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(tracking_config.classification_loss)
classification_loss, _, _, _, _, _, _ = losses_builder.build(loss)
return center_net_meta_arch.TrackParams(
num_track_ids=tracking_config.num_track_ids,
reid_embed_size=tracking_config.reid_embed_size,
classification_loss=classification_loss,
num_fc_layers=tracking_config.num_fc_layers,
task_loss_weight=tracking_config.task_loss_weight)
def temporal_offset_proto_to_params(temporal_offset_config):
"""Converts CenterNet.TemporalOffsetEstimation proto to param-tuple."""
loss = losses_pb2.Loss()
# Add dummy classification loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the classification loss
# directly.
loss.classification_loss.weighted_sigmoid.CopyFrom(
losses_pb2.WeightedSigmoidClassificationLoss())
loss.localization_loss.CopyFrom(temporal_offset_config.localization_loss)
_, localization_loss, _, _, _, _, _ = losses_builder.build(loss)
return center_net_meta_arch.TemporalOffsetParams(
localization_loss=localization_loss,
task_loss_weight=temporal_offset_config.task_loss_weight)
def _build_center_net_model(center_net_config, is_training, add_summaries):
"""Build a CenterNet detection model.
Args:
center_net_config: A CenterNet proto object with model configuration.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
CenterNetMetaArch based on the config.
"""
image_resizer_fn = image_resizer_builder.build(
center_net_config.image_resizer)
_check_feature_extractor_exists(center_net_config.feature_extractor.type)
feature_extractor = _build_center_net_feature_extractor(
center_net_config.feature_extractor, is_training)
object_center_params = object_center_proto_to_params(
center_net_config.object_center_params)
object_detection_params = None
if center_net_config.HasField('object_detection_task'):
object_detection_params = object_detection_proto_to_params(
center_net_config.object_detection_task)
if center_net_config.HasField('deepmac_mask_estimation'):
logging.warn(('Building experimental DeepMAC meta-arch.'
' Some features may be omitted.'))
deepmac_params = deepmac_meta_arch.deepmac_proto_to_params(
center_net_config.deepmac_mask_estimation)
return deepmac_meta_arch.DeepMACMetaArch(
is_training=is_training,
add_summaries=add_summaries,
num_classes=center_net_config.num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
deepmac_params=deepmac_params)
keypoint_params_dict = None
if center_net_config.keypoint_estimation_task:
label_map_proto = label_map_util.load_labelmap(
center_net_config.keypoint_label_map_path)
keypoint_map_dict = {
item.name: item for item in label_map_proto.item if item.keypoints
}
keypoint_params_dict = {}
keypoint_class_id_set = set()
all_keypoint_indices = []
for task in center_net_config.keypoint_estimation_task:
kp_params = keypoint_proto_to_params(task, keypoint_map_dict)
keypoint_params_dict[task.task_name] = kp_params
all_keypoint_indices.extend(kp_params.keypoint_indices)
if kp_params.class_id in keypoint_class_id_set:
raise ValueError(('Multiple keypoint tasks map to the same class id is '
'not allowed: %d' % kp_params.class_id))
else:
keypoint_class_id_set.add(kp_params.class_id)
if len(all_keypoint_indices) > len(set(all_keypoint_indices)):
raise ValueError('Some keypoint indices are used more than once.')
mask_params = None
if center_net_config.HasField('mask_estimation_task'):
mask_params = mask_proto_to_params(center_net_config.mask_estimation_task)
densepose_params = None
if center_net_config.HasField('densepose_estimation_task'):
densepose_params = densepose_proto_to_params(
center_net_config.densepose_estimation_task)
track_params = None
if center_net_config.HasField('track_estimation_task'):
track_params = tracking_proto_to_params(
center_net_config.track_estimation_task)
temporal_offset_params = None
if center_net_config.HasField('temporal_offset_task'):
temporal_offset_params = temporal_offset_proto_to_params(
center_net_config.temporal_offset_task)
non_max_suppression_fn = None
if center_net_config.HasField('post_processing'):
non_max_suppression_fn, _ = post_processing_builder.build(
center_net_config.post_processing)
return center_net_meta_arch.CenterNetMetaArch(
is_training=is_training,
add_summaries=add_summaries,
num_classes=center_net_config.num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
keypoint_params_dict=keypoint_params_dict,
mask_params=mask_params,
densepose_params=densepose_params,
track_params=track_params,
temporal_offset_params=temporal_offset_params,
use_depthwise=center_net_config.use_depthwise,
compute_heatmap_sparse=center_net_config.compute_heatmap_sparse,
non_max_suppression_fn=non_max_suppression_fn,
output_prediction_dict=center_net_config.output_prediction_dict)
def _build_center_net_feature_extractor(feature_extractor_config, is_training):
"""Build a CenterNet feature extractor from the given config."""
if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP:
raise ValueError('\'{}\' is not a known CenterNet feature extractor type'
.format(feature_extractor_config.type))
# For backwards compatibility:
use_separable_conv = (
feature_extractor_config.use_separable_conv or
feature_extractor_config.type == 'mobilenet_v2_fpn_sep_conv')
kwargs = {
'channel_means':
list(feature_extractor_config.channel_means),
'channel_stds':
list(feature_extractor_config.channel_stds),
'bgr_ordering':
feature_extractor_config.bgr_ordering,
}
if feature_extractor_config.HasField('depth_multiplier'):
kwargs.update({
'depth_multiplier': feature_extractor_config.depth_multiplier,
})
if feature_extractor_config.HasField('use_separable_conv'):
kwargs.update({
'use_separable_conv': use_separable_conv,
})
if feature_extractor_config.HasField('upsampling_interpolation'):
kwargs.update({
'upsampling_interpolation':
feature_extractor_config.upsampling_interpolation,
})
if feature_extractor_config.HasField('use_depthwise'):
kwargs.update({
'use_depthwise': feature_extractor_config.use_depthwise,
})
return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type](
**kwargs)
META_ARCH_BUILDER_MAP = {
'ssd': _build_ssd_model,
'faster_rcnn': _build_faster_rcnn_model,
'experimental_model': _build_experimental_model,
'center_net': _build_center_net_model
}
def build(model_config, is_training, add_summaries=True):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tensorflow summaries in the model graph.
Returns:
DetectionModel based on the config.
Raises:
ValueError: On invalid meta architecture or model.
"""
if not isinstance(model_config, model_pb2.DetectionModel):
raise ValueError('model_config not of type model_pb2.DetectionModel.')
meta_architecture = model_config.WhichOneof('model')
if meta_architecture not in META_ARCH_BUILDER_MAP:
raise ValueError('Unknown meta architecture: {}'.format(meta_architecture))
else:
build_func = META_ARCH_BUILDER_MAP[meta_architecture]
return build_func(getattr(model_config, meta_architecture), is_training,
add_summaries)
| 56,068 | 43.358386 | 121 | py |
models | models-master/research/object_detection/builders/target_assigner_builder.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.builders import box_coder_builder
from object_detection.builders import matcher_builder
from object_detection.builders import region_similarity_calculator_builder
from object_detection.core import target_assigner
def build(target_assigner_config):
"""Builds a TargetAssigner object based on the config.
Args:
target_assigner_config: A target_assigner proto message containing config
for the desired target assigner.
Returns:
TargetAssigner object based on the config.
"""
matcher_instance = matcher_builder.build(target_assigner_config.matcher)
similarity_calc_instance = region_similarity_calculator_builder.build(
target_assigner_config.similarity_calculator)
box_coder = box_coder_builder.build(target_assigner_config.box_coder)
return target_assigner.TargetAssigner(
matcher=matcher_instance,
similarity_calc=similarity_calc_instance,
box_coder_instance=box_coder)
| 1,715 | 40.853659 | 80 | py |
models | models-master/research/object_detection/builders/region_similarity_calculator_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for region_similarity_calculator_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import region_similarity_calculator_builder
from object_detection.core import region_similarity_calculator
from object_detection.protos import region_similarity_calculator_pb2 as sim_calc_pb2
class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase):
def testBuildIoaSimilarityCalculator(self):
similarity_calc_text_proto = """
ioa_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IoaSimilarity))
def testBuildIouSimilarityCalculator(self):
similarity_calc_text_proto = """
iou_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IouSimilarity))
def testBuildNegSqDistSimilarityCalculator(self):
similarity_calc_text_proto = """
neg_sq_dist_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.
NegSqDistSimilarity))
if __name__ == '__main__':
tf.test.main()
| 2,667 | 38.235294 | 84 | py |
models | models-master/research/object_detection/builders/box_predictor_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow.compat.v1 as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
apply_conv_hyperparams_pointwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the predictor
tower when using depthwise separable convolutions.
apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to
the pointwise_initializer and pointwise_regularizer when using depthwise
separable convolutions. By default, conv_hyperparams are only applied to
the depthwise initializer and regularizer when use_depthwise is true.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
apply_conv_hyperparams_pointwise=apply_conv_hyperparams_pointwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
apply_conv_hyperparams_to_heads=(
config_box_predictor.apply_conv_hyperparams_to_heads),
apply_conv_hyperparams_pointwise=(
config_box_predictor.apply_conv_hyperparams_pointwise),
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
| 48,110 | 47.450151 | 80 | py |
models | models-master/research/object_detection/builders/dataset_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import dataset_builder
from object_detection.core import standard_fields as fields
from object_detection.dataset_tools import seq_example_util
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def get_iterator_next_for_testing(dataset, is_tf2):
iterator = dataset.make_initializable_iterator()
if not is_tf2:
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator.get_next()
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
parent_path = os.path.dirname(tf.resource_loader.get_data_files_path())
return os.path.join(parent_path, 'data',
'pet_label_map.pbtxt')
class DatasetBuilderTest(test_case.TestCase):
def create_tf_record(self, has_additional_channels=False, num_shards=1,
num_examples_per_shard=1):
def dummy_jpeg_fn():
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
additional_channels_tensor = np.random.randint(
255, size=(4, 5, 1)).astype(np.uint8)
encoded_jpeg = tf.image.encode_jpeg(image_tensor)
encoded_additional_channels_jpeg = tf.image.encode_jpeg(
additional_channels_tensor)
return encoded_jpeg, encoded_additional_channels_jpeg
encoded_jpeg, encoded_additional_channels_jpeg = self.execute(
dummy_jpeg_fn, [])
tmp_dir = self.get_temp_dir()
flat_mask = (4 * 5) * [1.0]
for i in range(num_shards):
path = os.path.join(tmp_dir, '%05d.tfrecord' % i)
writer = tf.python_io.TFRecordWriter(path)
for j in range(num_examples_per_shard):
if num_shards > 1:
source_id = (str(i) + '_' + str(j)).encode()
else:
source_id = str(j).encode()
features = {
'image/source_id': dataset_util.bytes_feature(source_id),
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}
if has_additional_channels:
additional_channels_key = 'image/additional_channels/encoded'
features[additional_channels_key] = dataset_util.bytes_list_feature(
[encoded_additional_channels_jpeg] * 2)
example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(example.SerializeToString())
writer.close()
return os.path.join(self.get_temp_dir(), '?????.tfrecord')
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
return encoded_images_list
encoded_images = self.execute(graph_fn, [])
return encoded_images
def create_tf_record_sequence_example(self):
path = os.path.join(self.get_temp_dir(), 'seq_tfrecord')
writer = tf.python_io.TFRecordWriter(path)
num_frames = 4
image_height = 4
image_width = 5
image_source_ids = [str(i) for i in range(num_frames)]
with self.test_session():
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_source_ids=image_source_ids,
image_format='JPEG',
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[]], # Frame 0.
[[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.],
[0.1, 0.1, 0.2, 0.2]], # Frame 2.
[[]], # Frame 3.
],
label_strings=[
[], # Frame 0.
['Abyssinian'], # Frame 1.
['Abyssinian', 'american_bulldog'], # Frame 2.
[], # Frame 3
]).SerializeToString()
writer.write(sequence_example_serialized)
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertNotIn(
fields.InputDataFields.groundtruth_instance_masks, output_dict)
self.assertEqual((1, 4, 5, 3),
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([[2]],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0][0])
def get_mock_reduce_to_frame_fn(self):
def mock_reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, config):
def get_frame(tensor_dict):
out_tensor_dict = {}
out_tensor_dict[fields.InputDataFields.source_id] = (
tensor_dict[fields.InputDataFields.source_id][0])
return out_tensor_dict
return dataset_map_fn(dataset, get_frame, batch_size, config)
return mock_reduce_to_frame_fn
def test_build_tf_record_input_reader_sequence_example_train(self):
tf_record_path = self.create_tf_record_sequence_example()
label_map_path = _get_labelmap_path()
input_type = 'TF_SEQUENCE_EXAMPLE'
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: {1}
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path, input_type)
input_reader_proto = input_reader_pb2.InputReader()
input_reader_proto.label_map_path = label_map_path
text_format.Merge(input_reader_text_proto, input_reader_proto)
reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn()
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1,
reduce_to_frame_fn=reduce_to_frame_fn),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertEqual((1,),
output_dict[fields.InputDataFields.source_id].shape)
def test_build_tf_record_input_reader_sequence_example_test(self):
tf_record_path = self.create_tf_record_sequence_example()
input_type = 'TF_SEQUENCE_EXAMPLE'
label_map_path = _get_labelmap_path()
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: {1}
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path, input_type)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_reader_proto.label_map_path = label_map_path
reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn()
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1,
reduce_to_frame_fn=reduce_to_frame_fn),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertEqual((1,),
output_dict[fields.InputDataFields.source_id].shape)
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1),
self.is_tf2()
)
output_dict = self.execute(graph_fn, [])
self.assertAllEqual(
(1, 1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_build_tf_record_input_reader_with_batch_size_two(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
def graph_fn():
return dataset_builder.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2)).get_next()
output_dict = self.execute(graph_fn, [])
self.assertAllEqual([2, 4, 5, 3],
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual(
[2, 1, 3],
output_dict[fields.InputDataFields.groundtruth_classes].shape)
self.assertAllEqual(
[2, 1, 4], output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual([[[0.0, 0.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0]]],
output_dict[fields.InputDataFields.groundtruth_boxes])
def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
def graph_fn():
return dataset_builder.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2)).get_next()
output_dict = self.execute(graph_fn, [])
self.assertAllEqual(
[2, 1, 4, 5],
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
dataset_builder.build(input_reader_proto, batch_size=1)
def test_sample_all_data(self):
tf_record_path = self.create_tf_record(num_examples_per_shard=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
sample_1_of_n_examples: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=1)
sample1_ds = dataset.take(1)
sample2_ds = dataset.skip(1)
iter1 = dataset_builder.make_initializable_iterator(sample1_ds)
iter2 = dataset_builder.make_initializable_iterator(sample2_ds)
return iter1.get_next(), iter2.get_next()
output_dict1, output_dict2 = self.execute(graph_fn, [])
self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id])
self.assertEqual([b'1'], output_dict2[fields.InputDataFields.source_id])
def test_sample_one_of_n_shards(self):
tf_record_path = self.create_tf_record(num_examples_per_shard=4)
input_reader_text_proto = """
shuffle: false
num_readers: 1
sample_1_of_n_examples: 2
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=1)
sample1_ds = dataset.take(1)
sample2_ds = dataset.skip(1)
iter1 = dataset_builder.make_initializable_iterator(sample1_ds)
iter2 = dataset_builder.make_initializable_iterator(sample2_ds)
return iter1.get_next(), iter2.get_next()
output_dict1, output_dict2 = self.execute(graph_fn, [])
self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id])
self.assertEqual([b'2'], output_dict2[fields.InputDataFields.source_id])
def test_no_input_context(self):
"""Test that all samples are read with no input context given."""
tf_record_path = self.create_tf_record(num_examples_per_shard=16,
num_shards=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
num_epochs: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
for i in range(4):
# pylint:disable=cell-var-from-loop
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=8)
dataset = dataset.skip(i)
return get_iterator_next_for_testing(dataset, self.is_tf2())
batch = self.execute(graph_fn, [])
self.assertEqual(batch['image'].shape, (8, 4, 5, 3))
def graph_fn_last_batch():
dataset = dataset_builder.build(input_reader_proto, batch_size=8)
dataset = dataset.skip(4)
return get_iterator_next_for_testing(dataset, self.is_tf2())
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_last_batch, inputs=[])
def test_with_input_context(self):
"""Test that a subset is read with input context given."""
tf_record_path = self.create_tf_record(num_examples_per_shard=16,
num_shards=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
num_epochs: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_context = tf.distribute.InputContext(
num_input_pipelines=2, input_pipeline_id=0, num_replicas_in_sync=4
)
for i in range(8):
# pylint:disable=cell-var-from-loop
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=8,
input_context=input_context)
dataset = dataset.skip(i)
return get_iterator_next_for_testing(dataset, self.is_tf2())
batch = self.execute(graph_fn, [])
self.assertEqual(batch['image'].shape, (2, 4, 5, 3))
def graph_fn_last_batch():
dataset = dataset_builder.build(input_reader_proto, batch_size=8,
input_context=input_context)
dataset = dataset.skip(8)
return get_iterator_next_for_testing(dataset, self.is_tf2())
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_last_batch, inputs=[])
class ReadDatasetTest(test_case.TestCase):
def setUp(self):
self._path_template = os.path.join(self.get_temp_dir(), 'examples_%s.txt')
for i in range(5):
path = self._path_template % i
with tf.gfile.Open(path, 'wb') as f:
f.write('\n'.join([str(i + 1), str((i + 1) * 10)]))
self._shuffle_path_template = os.path.join(self.get_temp_dir(),
'shuffle_%s.txt')
for i in range(2):
path = self._shuffle_path_template % i
with tf.gfile.Open(path, 'wb') as f:
f.write('\n'.join([str(i)] * 5))
super(ReadDatasetTest, self).setUp()
def _get_dataset_next(self, files, config, batch_size, num_batches_skip=0):
def decode_func(value):
return [tf.string_to_number(value, out_type=tf.int32)]
dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files,
config)
dataset = dataset.map(decode_func)
dataset = dataset.batch(batch_size)
if num_batches_skip > 0:
dataset = dataset.skip(num_batches_skip)
return get_iterator_next_for_testing(dataset, self.is_tf2())
def _assert_item_count(self, data, item, percentage):
self.assertAlmostEqual(data.count(item)/len(data), percentage, places=1)
def test_make_initializable_iterator_with_hashTable(self):
def graph_fn():
keys = [1, 0, -1]
dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=keys, values=list(reversed(keys))),
default_value=100)
dataset = dataset.map(table.lookup)
return dataset_builder.make_initializable_iterator(dataset).get_next()
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [-1, 100, 1, 100])
def test_read_dataset_sample_from_datasets_weights_equal_weight(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([0.5, 0.5])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.25)
self._assert_item_count(data, 10, 0.25)
self._assert_item_count(data, 2, 0.25)
self._assert_item_count(data, 20, 0.25)
def test_read_dataset_sample_from_datasets_weights_non_normalized(self):
"""Ensure that the values are equally-weighted when not normalized."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
# Values are not normalized to sum to 1. In this case, it's a 50/50 split
# with each dataset having weight of 1.
config.sample_from_datasets_weights.extend([1, 1])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.25)
self._assert_item_count(data, 10, 0.25)
self._assert_item_count(data, 2, 0.25)
self._assert_item_count(data, 20, 0.25)
def test_read_dataset_sample_from_datasets_weights_zero_weight(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([1.0, 0.0])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.5)
self._assert_item_count(data, 10, 0.5)
self._assert_item_count(data, 2, 0.0)
self._assert_item_count(data, 20, 0.0)
def test_read_dataset_sample_from_datasets_weights_unbalanced(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([0.1, 0.9])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.05)
self._assert_item_count(data, 10, 0.05)
self._assert_item_count(data, 2, 0.45)
self._assert_item_count(data, 20, 0.45)
def test_read_dataset(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '*'], config, batch_size=20)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertCountEqual(
data, [
1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5,
50
])
def test_reduce_num_reader(self):
config = input_reader_pb2.InputReader()
config.num_readers = 10
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '*'], config, batch_size=20)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertCountEqual(
data, [
1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5,
50
])
def test_enable_shuffle(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = True
tf.set_random_seed(1) # Set graph level seed.
def graph_fn():
return self._get_dataset_next(
[self._shuffle_path_template % '*'], config, batch_size=10)
expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
data = self.execute(graph_fn, [])
self.assertTrue(
np.any(np.not_equal(data, expected_non_shuffle_output)))
def test_disable_shuffle_(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._shuffle_path_template % '*'], config, batch_size=10)
expected_non_shuffle_output1 = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
expected_non_shuffle_output2 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
# Note that the execute function extracts single outputs if the return
# value is of size 1.
data = self.execute(graph_fn, [])
self.assertTrue(all(data == expected_non_shuffle_output1) or
all(data == expected_non_shuffle_output2))
def test_read_dataset_single_epoch(self):
config = input_reader_pb2.InputReader()
config.num_epochs = 1
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0'], config, batch_size=30)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertAllEqual(data, [1, 10])
# First batch will retrieve as much as it can, second batch will fail.
def graph_fn_second_batch():
return self._get_dataset_next(
[self._path_template % '0'], config, batch_size=30,
num_batches_skip=1)
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_second_batch, inputs=[])
if __name__ == '__main__':
tf.test.main()
| 26,612 | 35.060976 | 80 | py |
models | models-master/research/object_detection/builders/box_predictor_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import unittest
from unittest import mock # pylint: disable=g-importing-member
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4)
self.assertTrue(class_head._apply_sigmoid_to_scores)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(class_head._num_class_slots, 10)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertTrue(class_head._use_depthwise)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertFalse(class_head._apply_sigmoid_to_scores)
self.assertEqual(class_head._num_class_slots, 91)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertFalse(class_head._use_depthwise)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
(box_predictor_proto.weight_shared_convolutional_box_predictor
.conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_non_default_depthwise_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertEqual(box_predictor._apply_batch_norm, False)
self.assertEqual(box_predictor._use_depthwise, True)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_default_conv_box_predictor_with_batch_norm(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
batch_norm {
train: true
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, True)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn,
'arg_scope')
self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn,
'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertTrue(box_head._use_dropout)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 3)
self.assertEqual(box_head._share_box_across_classes, True)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertIn(
mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
def test_build_box_predictor_with_convlve_then_upsample_masks(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24
box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = (
True)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertIn(
mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._convolve_then_upsample)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
| 26,417 | 38.607196 | 80 | py |
models | models-master/research/object_detection/builders/dataset_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import tensorflow.compat.v1 as tf
from object_detection.builders import decoder_builder
from object_detection.protos import input_reader_pb2
def make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
This is useful in cases where make_one_shot_iterator wouldn't work because
the graph contains a hash table that needs to be initialized.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
def _read_dataset_internal(file_read_func,
input_files,
num_readers,
config,
filename_shard_fn=None):
"""Reads a dataset, and handles repetition and shuffling.
Args:
file_read_func: Function to use in tf_data.parallel_interleave, to read
every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
num_readers: Number of readers to use.
config: A input_reader_builder.InputReader object.
filename_shard_fn: optional, A function used to shard filenames across
replicas. This function takes as input a TF dataset of filenames and is
expected to return its sharded version. It is useful when the dataset is
being loaded on one of possibly many replicas and we want to evenly shard
the files between the replicas.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
Raises:
RuntimeError: If no files are found at the supplied path(s).
"""
filenames = tf.gfile.Glob(input_files)
tf.logging.info('Reading record datasets for input file: %s' % input_files)
tf.logging.info('Number of filenames to read: %s' % len(filenames))
if not filenames:
raise RuntimeError('Did not find any input files matching the glob pattern '
'{}'.format(input_files))
if num_readers > len(filenames):
num_readers = len(filenames)
tf.logging.warning('num_readers has been reduced to %d to match input file '
'shards.' % num_readers)
filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
if config.shuffle:
filename_dataset = filename_dataset.shuffle(
config.filenames_shuffle_buffer_size)
elif num_readers > 1:
tf.logging.warning('`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.')
if filename_shard_fn:
filename_dataset = filename_shard_fn(filename_dataset)
filename_dataset = filename_dataset.repeat(config.num_epochs or None)
records_dataset = filename_dataset.apply(
tf.data.experimental.parallel_interleave(
file_read_func,
cycle_length=num_readers,
block_length=config.read_block_length,
sloppy=config.shuffle))
if config.shuffle:
records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
return records_dataset
def read_dataset(file_read_func, input_files, config, filename_shard_fn=None):
"""Reads multiple datasets with sampling.
Args:
file_read_func: Function to use in tf_data.parallel_interleave, to read
every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
config: A input_reader_builder.InputReader object.
filename_shard_fn: optional, A function used to shard filenames across
replicas. This function takes as input a TF dataset of filenames and is
expected to return its sharded version. It is useful when the dataset is
being loaded on one of possibly many replicas and we want to evenly shard
the files between the replicas.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
Raises:
RuntimeError: If no files are found at the supplied path(s).
"""
if config.sample_from_datasets_weights:
tf.logging.info('Reading weighted datasets: %s' % input_files)
if len(input_files) != len(config.sample_from_datasets_weights):
raise ValueError('Expected the number of input files to be the same as '
'the number of dataset sample weights. But got '
'[input_files, sample_from_datasets_weights]: [' +
input_files + ', ' +
str(config.sample_from_datasets_weights) + ']')
tf.logging.info('Sampling from datasets %s with weights %s' %
(input_files, config.sample_from_datasets_weights))
records_datasets = []
dataset_weights = []
for i, input_file in enumerate(input_files):
weight = config.sample_from_datasets_weights[i]
num_readers = math.ceil(config.num_readers *
weight /
sum(config.sample_from_datasets_weights))
tf.logging.info(
'Num readers for dataset [%s]: %d', input_file, num_readers)
if num_readers == 0:
tf.logging.info('Skipping dataset due to zero weights: %s', input_file)
continue
tf.logging.info(
'Num readers for dataset [%s]: %d', input_file, num_readers)
records_dataset = _read_dataset_internal(file_read_func, [input_file],
num_readers, config,
filename_shard_fn)
dataset_weights.append(weight)
records_datasets.append(records_dataset)
return tf.data.experimental.sample_from_datasets(records_datasets,
dataset_weights)
else:
tf.logging.info('Reading unweighted datasets: %s' % input_files)
return _read_dataset_internal(file_read_func, input_files,
config.num_readers, config, filename_shard_fn)
def shard_function_for_context(input_context):
"""Returns a function that shards filenames based on the input context."""
if input_context is None:
return None
def shard_fn(dataset):
return dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
return shard_fn
def build(input_reader_config, batch_size=None, transform_input_data_fn=None,
input_context=None, reduce_to_frame_fn=None):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
batch_size: Batch size. If batch size is None, no batching is performed.
transform_input_data_fn: Function to apply transformation to all records,
or None if no extra decoding is required.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
reduce_to_frame_fn: Function that extracts frames from tf.SequenceExample
type input data.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
decoder = decoder_builder.build(input_reader_config)
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
def dataset_map_fn(dataset, fn_to_map, batch_size=None,
input_reader_config=None):
"""Handles whether or not to use the legacy map function.
Args:
dataset: A tf.Dataset.
fn_to_map: The function to be mapped for that dataset.
batch_size: Batch size. If batch size is None, no batching is performed.
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tf.data.Dataset mapped with fn_to_map.
"""
if hasattr(dataset, 'map_with_legacy_function'):
if batch_size:
num_parallel_calls = batch_size * (
input_reader_config.num_parallel_batches)
else:
num_parallel_calls = input_reader_config.num_parallel_map_calls
dataset = dataset.map_with_legacy_function(
fn_to_map, num_parallel_calls=num_parallel_calls)
else:
dataset = dataset.map(fn_to_map, tf.data.experimental.AUTOTUNE)
return dataset
shard_fn = shard_function_for_context(input_context)
if input_context is not None:
batch_size = input_context.get_per_replica_batch_size(batch_size)
dataset = read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
config.input_path[:], input_reader_config, filename_shard_fn=shard_fn)
if input_reader_config.sample_1_of_n_examples > 1:
dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0)
# TODO(rathodv): make batch size a required argument once the old binaries
# are deleted.
dataset = dataset_map_fn(dataset, decoder.decode, batch_size,
input_reader_config)
if reduce_to_frame_fn:
dataset = reduce_to_frame_fn(dataset, dataset_map_fn, batch_size,
input_reader_config)
if transform_input_data_fn is not None:
dataset = dataset_map_fn(dataset, transform_input_data_fn,
batch_size, input_reader_config)
if batch_size:
dataset = dataset.batch(batch_size,
drop_remainder=input_reader_config.drop_remainder)
dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
return dataset
raise ValueError('Unsupported input_reader_config.')
| 11,251 | 41.460377 | 80 | py |
models | models-master/research/object_detection/builders/optimizer_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from official.modeling.optimization import ema_optimizer
# pylint: enable=g-import-not-at-top
try:
from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
def build_optimizers_tf_v1(optimizer_config, global_step=None):
"""Create a TF v1 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = tf_opt.MovingAverageOptimizer(
optimizer, average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build_optimizers_tf_v2(optimizer_config, global_step=None):
"""Create a TF v2 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.SGD(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer=optimizer,
average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build(config, global_step=None):
if tf.executing_eagerly():
return build_optimizers_tf_v2(config, global_step)
else:
return build_optimizers_tf_v1(config, global_step)
def _create_learning_rate(learning_rate_config, global_step=None):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
if global_step is None:
global_step = tf.train.get_or_create_global_step()
learning_rate = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
name='learning_rate')
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step,
config.initial_learning_rate,
config.decay_steps,
config.decay_factor,
burnin_learning_rate=config.burnin_learning_rate,
burnin_steps=config.burnin_steps,
min_learning_rate=config.min_learning_rate,
staircase=config.staircase)
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
learning_rate = learning_schedules.manual_stepping(
global_step, learning_rate_step_boundaries,
learning_rate_sequence, config.warmup)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step,
config.learning_rate_base,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
config.hold_base_rate_steps)
if learning_rate is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return learning_rate
| 7,782 | 35.369159 | 85 | py |
models | models-master/research/object_detection/builders/image_resizer_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'identity_resizer':
def image_resizer_fn(image, masks=None, **kwargs):
del kwargs
if masks is None:
return [image, tf.shape(image)]
else:
return [image, masks, tf.shape(image)]
return image_resizer_fn
elif image_resizer_oneof == 'conditional_shape_resizer':
conditional_shape_resize_config = (
image_resizer_config.conditional_shape_resizer)
method = _tf_resize_method(conditional_shape_resize_config.resize_method)
if conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.GREATER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_max_dimension,
max_dimension=conditional_shape_resize_config.size_threshold,
method=method)
elif conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.SMALLER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_min_dimension,
min_dimension=conditional_shape_resize_config.size_threshold,
method=method)
else:
raise ValueError(
'Invalid image resizer condition option for '
'ConditionalShapeResizer: \'%s\'.'
% conditional_shape_resize_config.condition)
if not conditional_shape_resize_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'pad_to_multiple_resizer':
pad_to_multiple_resizer_config = (
image_resizer_config.pad_to_multiple_resizer)
if pad_to_multiple_resizer_config.multiple < 0:
raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.')
else:
image_resizer_fn = functools.partial(
preprocessor.resize_pad_to_multiple,
multiple=pad_to_multiple_resizer_config.multiple)
if not pad_to_multiple_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image, masks=None):
"""Convert to grayscale before applying image_resizer_fn.
Args:
image: A 3D tensor of shape [height, width, 3]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, 1],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
"""
# image_resizer_fn returns [resized_image, resized_image_shape] if
# mask==None, otherwise it returns
# [resized_image, resized_mask, resized_image_shape]. In either case, we
# only deal with first and last element of the returned list.
retval = image_resizer_fn(image, masks)
resized_image = retval[0]
resized_image_shape = retval[-1]
retval[0] = preprocessor.rgb_to_gray(resized_image)
retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0)
return retval
return functools.partial(grayscale_image_resizer)
| 7,725 | 40.095745 | 80 | py |
models | models-master/research/object_detection/builders/preprocessor_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
def _get_step_config_from_proto(preprocessor_step_config, step_name):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for field, value in preprocessor_step_config.ListFields():
if field.name == step_name:
return value
raise ValueError('Could not get field %s from proto!' % step_name)
def _get_dict_from_proto(config):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict = {}
for field, value in config.ListFields():
result_dict[field.name] = value
return result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP = {
'normalize_image':
preprocessor.normalize_image,
'random_pixel_value_scale':
preprocessor.random_pixel_value_scale,
'random_image_scale':
preprocessor.random_image_scale,
'random_rgb_to_gray':
preprocessor.random_rgb_to_gray,
'random_adjust_brightness':
preprocessor.random_adjust_brightness,
'random_adjust_contrast':
preprocessor.random_adjust_contrast,
'random_adjust_hue':
preprocessor.random_adjust_hue,
'random_adjust_saturation':
preprocessor.random_adjust_saturation,
'random_distort_color':
preprocessor.random_distort_color,
'random_crop_to_aspect_ratio':
preprocessor.random_crop_to_aspect_ratio,
'random_black_patches':
preprocessor.random_black_patches,
'random_jpeg_quality':
preprocessor.random_jpeg_quality,
'random_downscale_to_target_pixels':
preprocessor.random_downscale_to_target_pixels,
'random_patch_gaussian':
preprocessor.random_patch_gaussian,
'rgb_to_gray':
preprocessor.rgb_to_gray,
'scale_boxes_to_pixel_coordinates':
(preprocessor.scale_boxes_to_pixel_coordinates),
'subtract_channel_mean':
preprocessor.subtract_channel_mean,
'convert_class_logits_to_softmax':
preprocessor.convert_class_logits_to_softmax,
'adjust_gamma':
preprocessor.adjust_gamma,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP = {
preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA,
preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
def get_random_jitter_kwargs(proto):
return {
'ratio':
proto.ratio,
'jitter_mode':
preprocessor_pb2.RandomJitterBoxes.JitterMode.Name(proto.jitter_mode
).lower()
}
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config,
step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
return (preprocessor.random_horizontal_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_vertical_flip':
config = preprocessor_step_config.random_vertical_flip
return (preprocessor.random_vertical_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_rotation90':
config = preprocessor_step_config.random_rotation90
return (preprocessor.random_rotation90,
{
'keypoint_rot_permutation': tuple(
config.keypoint_rot_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_crop_image':
config = preprocessor_step_config.random_crop_image
return (preprocessor.random_crop_image,
{
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
})
if step_type == 'random_pad_image':
config = preprocessor_step_config.random_pad_image
min_image_size = None
if (config.HasField('min_image_height') !=
config.HasField('min_image_width')):
raise ValueError('min_image_height and min_image_width should be either '
'both set or both unset.')
if config.HasField('min_image_height'):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if (config.HasField('max_image_height') !=
config.HasField('max_image_width')):
raise ValueError('max_image_height and max_image_width should be either '
'both set or both unset.')
if config.HasField('max_image_height'):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_pad_image,
{
'min_image_size': min_image_size,
'max_image_size': max_image_size,
'pad_color': pad_color,
})
if step_type == 'random_absolute_pad_image':
config = preprocessor_step_config.random_absolute_pad_image
max_height_padding = config.max_height_padding or 1
max_width_padding = config.max_width_padding or 1
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_absolute_pad_image,
{
'max_height_padding': max_height_padding,
'max_width_padding': max_width_padding,
'pad_color': pad_color,
})
if step_type == 'random_crop_pad_image':
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
kwargs = {
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
'pad_color': pad_color,
}
if min_padded_size_ratio:
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == 'random_resize_method':
config = preprocessor_step_config.random_resize_method
return (preprocessor.random_resize_method,
{
'target_size': [config.target_height, config.target_width],
})
if step_type == 'resize_image':
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (preprocessor.resize_image,
{
'new_height': config.new_height,
'new_width': config.new_width,
'method': method
})
if step_type == 'random_self_concat_image':
config = preprocessor_step_config.random_self_concat_image
return (preprocessor.random_self_concat_image, {
'concat_vertical_probability': config.concat_vertical_probability,
'concat_horizontal_probability': config.concat_horizontal_probability
})
if step_type == 'ssd_random_crop':
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop, {})
if step_type == 'autoaugment_image':
config = preprocessor_step_config.autoaugment_image
return (preprocessor.autoaugment_image, {
'policy_name': config.policy_name,
})
if step_type == 'drop_label_probabilistically':
config = preprocessor_step_config.drop_label_probabilistically
return (preprocessor.drop_label_probabilistically, {
'dropped_label': config.label,
'drop_probability': config.drop_probability,
})
if step_type == 'remap_labels':
config = preprocessor_step_config.remap_labels
return (preprocessor.remap_labels, {
'original_labels': config.original_labels,
'new_label': config.new_label
})
if step_type == 'ssd_random_crop_pad':
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [tuple(op.min_padded_size_ratio)
for op in config.operations]
max_padded_size_ratio = [tuple(op.max_padded_size_ratio)
for op in config.operations]
pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations]
return (preprocessor.ssd_random_crop_pad,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
'min_padded_size_ratio': min_padded_size_ratio,
'max_padded_size_ratio': max_padded_size_ratio,
'pad_color': pad_color,
})
return (preprocessor.ssd_random_crop_pad, {})
if step_type == 'ssd_random_crop_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
'min_object_covered': min_object_covered,
'aspect_ratio': config.aspect_ratio,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
kwargs = {}
aspect_ratio = config.aspect_ratio
if aspect_ratio:
kwargs['aspect_ratio'] = aspect_ratio
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio:
if len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio:
if len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if config.operations:
kwargs['min_object_covered'] = [op.min_object_covered
for op in config.operations]
kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
kwargs['area_range'] = [(op.min_area, op.max_area)
for op in config.operations]
kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations]
kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations]
kwargs['random_coef'] = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs)
if step_type == 'random_square_crop_by_scale':
config = preprocessor_step_config.random_square_crop_by_scale
return preprocessor.random_square_crop_by_scale, {
'scale_min': config.scale_min,
'scale_max': config.scale_max,
'max_border': config.max_border,
'num_scales': config.num_scales
}
if step_type == 'random_scale_crop_and_pad_to_square':
config = preprocessor_step_config.random_scale_crop_and_pad_to_square
return preprocessor.random_scale_crop_and_pad_to_square, {
'scale_min': config.scale_min,
'scale_max': config.scale_max,
'output_size': config.output_size,
}
if step_type == 'random_jitter_boxes':
config = preprocessor_step_config.random_jitter_boxes
kwargs = get_random_jitter_kwargs(config)
return preprocessor.random_jitter_boxes, kwargs
raise ValueError('Unknown preprocessing step.')
| 18,154 | 39.88964 | 80 | py |
models | models-master/research/object_detection/builders/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/builders/graph_rewriter_builder.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for quantized training and evaluation."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import quantize as contrib_quantize
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def build(graph_rewriter_config, is_training):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
is_training: whether in training of eval mode.
"""
def graph_rewrite_fn():
"""Function to quantize weights and activation of the default graph."""
if (graph_rewriter_config.quantization.weight_bits != 8 or
graph_rewriter_config.quantization.activation_bits != 8):
raise ValueError('Only 8bit quantization is supported')
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
contrib_quantize.experimental_create_training_graph(
input_graph=tf.get_default_graph(),
quant_delay=graph_rewriter_config.quantization.delay
)
else:
contrib_quantize.experimental_create_eval_graph(
input_graph=tf.get_default_graph()
)
slim.summarize_collection('quant_vars')
return graph_rewrite_fn
| 2,019 | 36.407407 | 80 | py |
models | models-master/research/object_detection/builders/decoder_builder.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataDecoder builder.
Creates DataDecoders from InputReader configs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from object_detection.data_decoders import tf_example_decoder
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.protos import input_reader_pb2
def build(input_reader_config):
"""Builds a DataDecoder based only on the open source config proto.
Args:
input_reader_config: An input_reader_pb2.InputReader object.
Returns:
A DataDecoder based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
input_type = input_reader_config.input_type
if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
load_multiclass_scores=input_reader_config.load_multiclass_scores,
load_context_features=input_reader_config.load_context_features,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
use_display_name=input_reader_config.use_display_name,
num_additional_channels=input_reader_config.num_additional_channels,
num_keypoints=input_reader_config.num_keypoints,
expand_hierarchy_labels=input_reader_config.expand_labels_hierarchy,
load_dense_pose=input_reader_config.load_dense_pose,
load_track_id=input_reader_config.load_track_id,
load_keypoint_depth_features=input_reader_config
.load_keypoint_depth_features)
return decoder
elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'):
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features,
load_context_image_ids=input_reader_config.load_context_image_ids)
return decoder
raise ValueError('Unsupported input_type in config.')
raise ValueError('Unsupported input_reader_config.')
| 3,304 | 43.066667 | 80 | py |
models | models-master/research/object_detection/builders/preprocessor_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessor_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
class PreprocessorBuilderTest(tf.test.TestCase):
def assert_dictionary_close(self, dict1, dict2):
"""Helper to check if two dicts with floatst or integers are close."""
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
def test_build_normalize_image(self):
preprocessor_text_proto = """
normalize_image {
original_minval: 0.0
original_maxval: 255.0
target_minval: -1.0
target_maxval: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.normalize_image)
self.assertEqual(args, {
'original_minval': 0.0,
'original_maxval': 255.0,
'target_minval': -1.0,
'target_maxval': 1.0,
})
def test_build_random_horizontal_flip(self):
preprocessor_text_proto = """
random_horizontal_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_horizontal_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_vertical_flip(self):
preprocessor_text_proto = """
random_vertical_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_vertical_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_rotation90(self):
preprocessor_text_proto = """
random_rotation90 {
keypoint_rot_permutation: 3
keypoint_rot_permutation: 0
keypoint_rot_permutation: 1
keypoint_rot_permutation: 2
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rotation90)
self.assertEqual(args, {'keypoint_rot_permutation': (3, 0, 1, 2),
'probability': 0.5})
def test_build_random_pixel_value_scale(self):
preprocessor_text_proto = """
random_pixel_value_scale {
minval: 0.8
maxval: 1.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pixel_value_scale)
self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})
def test_build_random_image_scale(self):
preprocessor_text_proto = """
random_image_scale {
min_scale_ratio: 0.8
max_scale_ratio: 2.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_image_scale)
self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,
'max_scale_ratio': 2.2})
def test_build_random_rgb_to_gray(self):
preprocessor_text_proto = """
random_rgb_to_gray {
probability: 0.8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rgb_to_gray)
self.assert_dictionary_close(args, {'probability': 0.8})
def test_build_random_adjust_brightness(self):
preprocessor_text_proto = """
random_adjust_brightness {
max_delta: 0.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_brightness)
self.assert_dictionary_close(args, {'max_delta': 0.2})
def test_build_random_adjust_contrast(self):
preprocessor_text_proto = """
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_contrast)
self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})
def test_build_random_adjust_hue(self):
preprocessor_text_proto = """
random_adjust_hue {
max_delta: 0.01
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_hue)
self.assert_dictionary_close(args, {'max_delta': 0.01})
def test_build_random_adjust_saturation(self):
preprocessor_text_proto = """
random_adjust_saturation {
min_delta: 0.75
max_delta: 1.15
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_saturation)
self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})
def test_build_random_distort_color(self):
preprocessor_text_proto = """
random_distort_color {
color_ordering: 1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_distort_color)
self.assertEqual(args, {'color_ordering': 1})
def test_build_random_jitter_boxes(self):
preprocessor_text_proto = """
random_jitter_boxes {
ratio: 0.1
jitter_mode: SHRINK
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jitter_boxes)
self.assert_dictionary_close(args, {'ratio': 0.1, 'jitter_mode': 'shrink'})
def test_build_random_crop_image(self):
preprocessor_text_proto = """
random_crop_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_pad_image(self):
preprocessor_text_proto = """
random_pad_image {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pad_image)
self.assertEqual(args, {
'min_image_size': None,
'max_image_size': None,
'pad_color': None,
})
def test_build_random_absolute_pad_image(self):
preprocessor_text_proto = """
random_absolute_pad_image {
max_height_padding: 50
max_width_padding: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_absolute_pad_image)
self.assertEqual(args, {
'max_height_padding': 50,
'max_width_padding': 100,
'pad_color': None,
})
def test_build_random_crop_pad_image(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'pad_color': None,
})
def test_build_random_crop_pad_image_with_optional_parameters(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
min_padded_size_ratio: 0.5
min_padded_size_ratio: 0.75
max_padded_size_ratio: 0.5
max_padded_size_ratio: 0.75
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'min_padded_size_ratio': (0.5, 0.75),
'max_padded_size_ratio': (0.5, 0.75),
'pad_color': None,
})
def test_build_random_crop_to_aspect_ratio(self):
preprocessor_text_proto = """
random_crop_to_aspect_ratio {
aspect_ratio: 0.85
overlap_thresh: 0.35
clip_boxes: False
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)
self.assert_dictionary_close(args, {'aspect_ratio': 0.85,
'overlap_thresh': 0.35,
'clip_boxes': False})
def test_build_random_black_patches(self):
preprocessor_text_proto = """
random_black_patches {
max_black_patches: 20
probability: 0.95
size_to_image_ratio: 0.12
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_black_patches)
self.assert_dictionary_close(args, {'max_black_patches': 20,
'probability': 0.95,
'size_to_image_ratio': 0.12})
def test_build_random_jpeg_quality(self):
preprocessor_text_proto = """
random_jpeg_quality {
random_coef: 0.5
min_jpeg_quality: 40
max_jpeg_quality: 90
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jpeg_quality)
self.assert_dictionary_close(args, {'random_coef': 0.5,
'min_jpeg_quality': 40,
'max_jpeg_quality': 90})
def test_build_random_downscale_to_target_pixels(self):
preprocessor_text_proto = """
random_downscale_to_target_pixels {
random_coef: 0.5
min_target_pixels: 200
max_target_pixels: 900
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_downscale_to_target_pixels)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_target_pixels': 200,
'max_target_pixels': 900
})
def test_build_random_patch_gaussian(self):
preprocessor_text_proto = """
random_patch_gaussian {
random_coef: 0.5
min_patch_size: 10
max_patch_size: 300
min_gaussian_stddev: 0.2
max_gaussian_stddev: 1.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_patch_gaussian)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_patch_size': 10,
'max_patch_size': 300,
'min_gaussian_stddev': 0.2,
'max_gaussian_stddev': 1.5
})
def test_auto_augment_image(self):
preprocessor_text_proto = """
autoaugment_image {
policy_name: 'v0'
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.autoaugment_image)
self.assert_dictionary_close(args, {'policy_name': 'v0'})
def test_drop_label_probabilistically(self):
preprocessor_text_proto = """
drop_label_probabilistically{
label: 2
drop_probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.drop_label_probabilistically)
self.assert_dictionary_close(args, {
'dropped_label': 2,
'drop_probability': 0.5
})
def test_remap_labels(self):
preprocessor_text_proto = """
remap_labels{
original_labels: 1
original_labels: 2
new_label: 3
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.remap_labels)
self.assert_dictionary_close(args, {
'original_labels': [1, 2],
'new_label': 3
})
def test_build_random_resize_method(self):
preprocessor_text_proto = """
random_resize_method {
target_height: 75
target_width: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_resize_method)
self.assert_dictionary_close(args, {'target_size': [75, 100]})
def test_build_scale_boxes_to_pixel_coordinates(self):
preprocessor_text_proto = """
scale_boxes_to_pixel_coordinates {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)
self.assertEqual(args, {})
def test_build_resize_image(self):
preprocessor_text_proto = """
resize_image {
new_height: 75
new_width: 100
method: BICUBIC
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.resize_image)
self.assertEqual(args, {'new_height': 75,
'new_width': 100,
'method': tf.image.ResizeMethod.BICUBIC})
def test_build_rgb_to_gray(self):
preprocessor_text_proto = """
rgb_to_gray {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.rgb_to_gray)
self.assertEqual(args, {})
def test_build_subtract_channel_mean(self):
preprocessor_text_proto = """
subtract_channel_mean {
means: [1.0, 2.0, 3.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.subtract_channel_mean)
self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})
def test_random_self_concat_image(self):
preprocessor_text_proto = """
random_self_concat_image {
concat_vertical_probability: 0.5
concat_horizontal_probability: 0.25
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_self_concat_image)
self.assertEqual(args, {'concat_vertical_probability': 0.5,
'concat_horizontal_probability': 0.25})
def test_build_ssd_random_crop(self):
preprocessor_text_proto = """
ssd_random_crop {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_empty_operations(self):
preprocessor_text_proto = """
ssd_random_crop {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {})
def test_build_ssd_random_crop_pad(self):
preprocessor_text_proto = """
ssd_random_crop_pad {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_pad)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)],
'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],
'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})
def test_build_ssd_random_crop_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_pad_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function,
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': (1.0, 1.0),
'max_padded_size_ratio': (2.0, 2.0)})
def test_build_normalize_image_convert_class_logits_to_softmax(self):
preprocessor_text_proto = """
convert_class_logits_to_softmax {
temperature: 2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.convert_class_logits_to_softmax)
self.assertEqual(args, {'temperature': 2})
def test_random_crop_by_scale(self):
preprocessor_text_proto = """
random_square_crop_by_scale {
scale_min: 0.25
scale_max: 2.0
num_scales: 8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_square_crop_by_scale)
self.assertEqual(args, {
'scale_min': 0.25,
'scale_max': 2.0,
'num_scales': 8,
'max_border': 128
})
def test_adjust_gamma(self):
preprocessor_text_proto = """
adjust_gamma {
gamma: 2.2
gain: 2.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.adjust_gamma)
self.assert_dictionary_close(args, {'gamma': 2.2, 'gain': 2.0})
if __name__ == '__main__':
tf.test.main()
| 28,286 | 35.546512 | 80 | py |
models | models-master/research/object_detection/builders/model_builder_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_builder under TensorFlow 2.X."""
import os
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import model_builder
from object_detection.builders import model_builder_test
from object_detection.core import losses
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models.keras_models import hourglass_network
from object_detection.protos import center_net_pb2
from object_detection.protos import model_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ModelBuilderTF2Test(
model_builder_test.ModelBuilderTest, parameterized.TestCase):
def default_ssd_feature_extractor(self):
return 'ssd_resnet50_v1_fpn_keras'
def default_faster_rcnn_feature_extractor(self):
return 'faster_rcnn_resnet101_keras'
def ssd_feature_extractors(self):
return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {}
def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_fake_label_map_file_path(self):
keypoint_spec_text = """
item {
name: "/m/01g317"
id: 1
display_name: "person"
keypoints {
id: 0
label: 'nose'
}
keypoints {
id: 1
label: 'left_shoulder'
}
keypoints {
id: 2
label: 'right_shoulder'
}
keypoints {
id: 3
label: 'hip'
}
}
"""
keypoint_label_map_path = os.path.join(
self.get_temp_dir(), 'keypoint_label_map')
with tf.gfile.Open(keypoint_label_map_path, 'wb') as f:
f.write(keypoint_spec_text)
return keypoint_label_map_path
def get_fake_keypoint_proto(self, customize_head_params=False):
task_proto_txt = """
task_name: "human_pose"
task_loss_weight: 0.9
keypoint_regression_loss_weight: 1.0
keypoint_heatmap_loss_weight: 0.1
keypoint_offset_loss_weight: 0.5
heatmap_bias_init: 2.14
keypoint_class_name: "/m/01g317"
loss {
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
localization_loss {
l1_localization_loss {
}
}
}
keypoint_label_to_std {
key: "nose"
value: 0.3
}
keypoint_label_to_std {
key: "hip"
value: 0.0
}
keypoint_candidate_score_threshold: 0.3
num_candidates_per_keypoint: 12
peak_max_pool_kernel_size: 5
unmatched_keypoint_score: 0.05
box_scale: 1.7
candidate_search_scale: 0.2
candidate_ranking_mode: "score_distance_ratio"
offset_peak_radius: 3
per_keypoint_offset: true
predict_depth: true
per_keypoint_depth: true
keypoint_depth_loss_weight: 0.3
score_distance_multiplier: 11.0
std_dev_multiplier: 2.8
rescoring_threshold: 0.5
gaussian_denom_ratio: 0.3
argmax_postprocessing: True
"""
if customize_head_params:
task_proto_txt += """
heatmap_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
offset_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
config = text_format.Merge(task_proto_txt,
center_net_pb2.CenterNet.KeypointEstimation())
return config
def get_fake_object_center_proto(self, customize_head_params=False):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
peak_max_pool_kernel_size: 5
"""
if customize_head_params:
proto_txt += """
center_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_center_from_keypoints_proto(self):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_detection_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.5
offset_loss_weight: 0.1
scale_loss_weight: 0.2
localization_loss {
l1_localization_loss {
}
}
"""
if customize_head_params:
proto_txt += """
scale_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectDetection())
def get_fake_mask_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.7
classification_loss {
weighted_softmax {}
}
mask_height: 8
mask_width: 8
score_threshold: 0.7
heatmap_bias_init: -2.0
"""
if customize_head_params:
proto_txt += """
mask_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.MaskEstimation())
def get_fake_densepose_proto(self):
proto_txt = """
task_loss_weight: 0.5
class_id: 0
loss {
classification_loss {
weighted_softmax {}
}
localization_loss {
l1_localization_loss {
}
}
}
num_parts: 24
part_loss_weight: 1.0
coordinate_loss_weight: 2.0
upsample_to_input_res: true
heatmap_bias_init: -2.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.DensePoseEstimation())
@parameterized.parameters(
{'customize_head_params': True},
{'customize_head_params': False}
)
def test_create_center_net_model(self, customize_head_params):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Merge(proto_txt, model_pb2.DetectionModel())
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_proto(
customize_head_params=customize_head_params))
config.center_net.object_detection_task.CopyFrom(
self.get_fake_object_detection_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
config.center_net.mask_estimation_task.CopyFrom(
self.get_fake_mask_proto(
customize_head_params=customize_head_params))
config.center_net.densepose_estimation_task.CopyFrom(
self.get_fake_densepose_proto())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertIsInstance(model._center_params.classification_loss,
losses.PenaltyReducedLogisticFocalLoss)
self.assertEqual(model._center_params.classification_loss._alpha, 3.0)
self.assertEqual(model._center_params.classification_loss._beta, 4.0)
self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2)
self.assertAlmostEqual(
model._center_params.heatmap_bias_init, 3.14, places=4)
self.assertEqual(model._center_params.max_box_predictions, 15)
if customize_head_params:
self.assertEqual(model._center_params.center_head_num_filters, [64, 32])
self.assertEqual(model._center_params.center_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._center_params.center_head_num_filters, [256])
self.assertEqual(model._center_params.center_head_kernel_sizes, [3])
self.assertEqual(model._center_params.peak_max_pool_kernel_size, 5)
# Check object detection related parameters.
self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1)
self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2)
self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5)
self.assertIsInstance(model._od_params.localization_loss,
losses.L1LocalizationLoss)
self.assertEqual(model._od_params.offset_head_num_filters, [256])
self.assertEqual(model._od_params.offset_head_kernel_sizes, [3])
if customize_head_params:
self.assertEqual(model._od_params.scale_head_num_filters, [128, 64])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._od_params.scale_head_num_filters, [256])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [3])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0)
self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5)
self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4)
self.assertEqual(kp_params.classification_loss._alpha, 3.0)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0])
self.assertEqual(kp_params.classification_loss._beta, 4.0)
self.assertIsInstance(kp_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3)
self.assertEqual(kp_params.num_candidates_per_keypoint, 12)
self.assertEqual(kp_params.peak_max_pool_kernel_size, 5)
self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05)
self.assertAlmostEqual(kp_params.box_scale, 1.7)
self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2)
self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio')
self.assertEqual(kp_params.offset_peak_radius, 3)
self.assertEqual(kp_params.per_keypoint_offset, True)
self.assertEqual(kp_params.predict_depth, True)
self.assertEqual(kp_params.per_keypoint_depth, True)
self.assertAlmostEqual(kp_params.keypoint_depth_loss_weight, 0.3)
self.assertAlmostEqual(kp_params.score_distance_multiplier, 11.0)
self.assertAlmostEqual(kp_params.std_dev_multiplier, 2.8)
self.assertAlmostEqual(kp_params.rescoring_threshold, 0.5)
if customize_head_params:
# Set by the config.
self.assertEqual(kp_params.heatmap_head_num_filters, [64, 32])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [5, 3])
self.assertEqual(kp_params.offset_head_num_filters, [128, 64])
self.assertEqual(kp_params.offset_head_kernel_sizes, [5, 3])
else:
# Default values:
self.assertEqual(kp_params.heatmap_head_num_filters, [256])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [3])
self.assertEqual(kp_params.offset_head_num_filters, [256])
self.assertEqual(kp_params.offset_head_kernel_sizes, [3])
self.assertAlmostEqual(kp_params.gaussian_denom_ratio, 0.3)
self.assertEqual(kp_params.argmax_postprocessing, True)
# Check mask related parameters.
self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7)
self.assertIsInstance(model._mask_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertEqual(model._mask_params.mask_height, 8)
self.assertEqual(model._mask_params.mask_width, 8)
self.assertAlmostEqual(model._mask_params.score_threshold, 0.7)
self.assertAlmostEqual(
model._mask_params.heatmap_bias_init, -2.0, places=4)
if customize_head_params:
self.assertEqual(model._mask_params.mask_head_num_filters, [128, 64])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._mask_params.mask_head_num_filters, [256])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [3])
# Check DensePose related parameters.
self.assertEqual(model._densepose_params.class_id, 0)
self.assertIsInstance(model._densepose_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(model._densepose_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0)
self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0)
self.assertEqual(model._densepose_params.num_parts, 24)
self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5)
self.assertTrue(model._densepose_params.upsample_to_input_res)
self.assertEqual(model._densepose_params.upsample_method, 'bilinear')
self.assertAlmostEqual(
model._densepose_params.heatmap_bias_init, -2.0, places=4)
# Check feature extractor parameters.
self.assertIsInstance(
model._feature_extractor, center_net_hourglass_feature_extractor
.CenterNetHourglassFeatureExtractor)
self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0])
self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6])
self.assertTrue(model._feature_extractor._bgr_ordering)
backbone = model._feature_extractor._network
self.assertIsInstance(backbone, hourglass_network.HourglassNetwork)
self.assertTrue(backbone.num_hourglasses, 1)
def test_create_center_net_model_from_keypoints(self):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertEqual(model._center_params.keypoint_weights_for_center,
[1.0, 0.0, 1.0, 0.0])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
def test_create_center_net_model_mobilenet(self):
"""Test building a CenterNet model using bilinear interpolation."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "mobilenet_v2_fpn"
depth_multiplier: 2.0
use_separable_conv: true
upsampling_interpolation: "bilinear"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
feature_extractor = model._feature_extractor
# Verify the upsampling layers in the FPN use 'bilinear' interpolation.
fpn = feature_extractor.get_layer('model_1')
num_up_sampling2d_layers = 0
for layer in fpn.layers:
if 'up_sampling2d' in layer.name:
num_up_sampling2d_layers += 1
self.assertEqual('bilinear', layer.interpolation)
# Verify that there are up_sampling2d layers.
self.assertGreater(num_up_sampling2d_layers, 0)
# Verify that the FPN ops uses separable conv.
for layer in fpn.layers:
# Convolution layers with kernel size not equal to (1, 1) should be
# separable 2D convolutions.
if 'conv' in layer.name and layer.kernel_size != (1, 1):
self.assertIsInstance(layer, tf.keras.layers.SeparableConv2D)
# Verify that the backbone indeed double the number of channel according to
# the depthmultiplier.
backbone = feature_extractor.get_layer('model')
first_conv = backbone.get_layer('Conv1')
# Note that the first layer typically has 32 filters, but this model has
# a depth multiplier of 2.
self.assertEqual(64, first_conv.filters)
def test_create_center_net_deepmac(self):
"""Test building a CenterNet DeepMAC model."""
proto_txt = """
center_net {
num_classes: 90
feature_extractor {
type: "hourglass_52"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
deepmac_mask_estimation {
classification_loss {
weighted_sigmoid {}
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
self.assertIsInstance(model, deepmac_meta_arch.DeepMACMetaArch)
if __name__ == '__main__':
tf.test.main()
| 21,312 | 35.494863 | 80 | py |
models | models-master/research/object_detection/builders/decoder_builder_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for decoder_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import decoder_builder
from object_detection.core import standard_fields as fields
from object_detection.dataset_tools import seq_example_util
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
parent_path = os.path.dirname(tf.resource_loader.get_data_files_path())
return os.path.join(parent_path, 'data',
'pet_label_map.pbtxt')
class DecoderBuilderTest(test_case.TestCase):
def _make_serialized_tf_example(self, has_additional_channels=False):
image_tensor_np = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
additional_channels_tensor_np = np.random.randint(
255, size=(4, 5, 1)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
def graph_fn(image_tensor):
encoded_jpeg = tf.image.encode_jpeg(image_tensor)
return encoded_jpeg
encoded_jpeg = self.execute_cpu(graph_fn, [image_tensor_np])
encoded_additional_channels_jpeg = self.execute_cpu(
graph_fn, [additional_channels_tensor_np])
features = {
'image/source_id': dataset_util.bytes_feature('0'.encode()),
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
'image/object/keypoint/x': dataset_util.float_list_feature([1.0, 1.0]),
'image/object/keypoint/y': dataset_util.float_list_feature([1.0, 1.0])
}
if has_additional_channels:
additional_channels_key = 'image/additional_channels/encoded'
features[additional_channels_key] = dataset_util.bytes_list_feature(
[encoded_additional_channels_jpeg] * 2)
example = tf.train.Example(features=tf.train.Features(feature=features))
return example.SerializeToString()
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images = [tf.io.encode_jpeg(image) for image in images_list]
return encoded_images
return self.execute_cpu(graph_fn, [])
def _make_serialized_tf_sequence_example(self):
num_frames = 4
image_height = 20
image_width = 30
image_source_ids = [str(i) for i in range(num_frames)]
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_source_ids=image_source_ids,
image_format='JPEG',
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[]], # Frame 0.
[[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.],
[0.1, 0.1, 0.2, 0.2]], # Frame 2.
[[]], # Frame 3.
],
label_strings=[
[], # Frame 0.
['Abyssinian'], # Frame 1.
['Abyssinian', 'american_bulldog'], # Frame 2.
[], # Frame 3
]).SerializeToString()
return sequence_example_serialized
def test_build_tf_record_input_reader(self):
input_reader_text_proto = 'tf_record_input_reader {}'
input_reader_proto = input_reader_pb2.InputReader()
text_format.Parse(input_reader_text_proto, input_reader_proto)
decoder = decoder_builder.build(input_reader_proto)
serialized_seq_example = self._make_serialized_tf_example()
def graph_fn():
tensor_dict = decoder.decode(serialized_seq_example)
return (tensor_dict[fields.InputDataFields.image],
tensor_dict[fields.InputDataFields.groundtruth_classes],
tensor_dict[fields.InputDataFields.groundtruth_boxes])
(image, groundtruth_classes,
groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertEqual((4, 5, 3), image.shape)
self.assertAllEqual([2], groundtruth_classes)
self.assertEqual((1, 4), groundtruth_boxes.shape)
self.assertAllEqual([0.0, 0.0, 1.0, 1.0], groundtruth_boxes[0])
def test_build_tf_record_input_reader_sequence_example(self):
label_map_path = _get_labelmap_path()
input_reader_text_proto = """
input_type: TF_SEQUENCE_EXAMPLE
tf_record_input_reader {}
"""
input_reader_proto = input_reader_pb2.InputReader()
input_reader_proto.label_map_path = label_map_path
text_format.Parse(input_reader_text_proto, input_reader_proto)
serialized_seq_example = self._make_serialized_tf_sequence_example()
def graph_fn():
decoder = decoder_builder.build(input_reader_proto)
tensor_dict = decoder.decode(serialized_seq_example)
return (tensor_dict[fields.InputDataFields.image],
tensor_dict[fields.InputDataFields.groundtruth_classes],
tensor_dict[fields.InputDataFields.groundtruth_boxes],
tensor_dict[fields.InputDataFields.num_groundtruth_boxes])
(actual_image, actual_groundtruth_classes, actual_groundtruth_boxes,
actual_num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]]
expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
expected_num_groundtruth_boxes = [0, 1, 2, 0]
# Sequence example images are encoded.
self.assertEqual((4,), actual_image.shape)
self.assertAllEqual(expected_groundtruth_classes,
actual_groundtruth_classes)
self.assertAllClose(expected_groundtruth_boxes,
actual_groundtruth_boxes)
self.assertAllClose(
expected_num_groundtruth_boxes, actual_num_groundtruth_boxes)
def test_build_tf_record_input_reader_and_load_instance_masks(self):
input_reader_text_proto = """
load_instance_masks: true
tf_record_input_reader {}
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Parse(input_reader_text_proto, input_reader_proto)
decoder = decoder_builder.build(input_reader_proto)
serialized_seq_example = self._make_serialized_tf_example()
def graph_fn():
tensor_dict = decoder.decode(serialized_seq_example)
return tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
masks = self.execute_cpu(graph_fn, [])
self.assertAllEqual((1, 4, 5), masks.shape)
def test_build_tf_record_input_reader_and_load_keypoint_depth(self):
input_reader_text_proto = """
load_keypoint_depth_features: true
num_keypoints: 2
tf_record_input_reader {}
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Parse(input_reader_text_proto, input_reader_proto)
decoder = decoder_builder.build(input_reader_proto)
serialized_example = self._make_serialized_tf_example()
def graph_fn():
tensor_dict = decoder.decode(serialized_example)
return (tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths],
tensor_dict[
fields.InputDataFields.groundtruth_keypoint_depth_weights])
(kpts_depths, kpts_depth_weights) = self.execute_cpu(graph_fn, [])
self.assertAllEqual((1, 2), kpts_depths.shape)
self.assertAllEqual((1, 2), kpts_depth_weights.shape)
if __name__ == '__main__':
tf.test.main()
| 9,406 | 42.35023 | 80 | py |
models | models-master/research/object_detection/builders/model_builder_tf1_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_builder under TensorFlow 1.X."""
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from object_detection.builders import model_builder
from object_detection.builders import model_builder_test
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import losses_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest):
def default_ssd_feature_extractor(self):
return 'ssd_resnet50_v1_fpn'
def default_faster_rcnn_feature_extractor(self):
return 'faster_rcnn_resnet101'
def ssd_feature_extractors(self):
return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {'ssd_inception_v2', 'ssd_inception_v3'}
def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
@parameterized.parameters(True, False)
def test_create_context_rcnn_from_config_with_params(self, is_training):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.context_config.attention_bottleneck_dimension = 10
model_proto.faster_rcnn.context_config.attention_temperature = 0.5
model = model_builder.build(model_proto, is_training=is_training)
self.assertIsInstance(model, context_rcnn_meta_arch.ContextRCNNMetaArch)
if __name__ == '__main__':
tf.test.main()
| 2,340 | 39.362069 | 80 | py |
models | models-master/research/object_detection/builders/hyperparams_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
def _get_scope_key(op):
return getattr(op, '_key_op', str(op))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.')
class HyperparamsBuilderTest(tf.test.TestCase):
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d), scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.separable_conv2d), scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d_transpose), scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.fully_connected), scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = list(scope.values())[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=False)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def test_use_swish_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.')
class KerasHyperparamsBuilderTest(tf.test.TestCase):
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
var = tf.Variable(initializer(shape=shape, dtype=tf.float32))
self.assertAllClose(np.var(var.numpy()), variance, tol, tol)
def test_return_l1_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_l1_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.5)
def test_return_l2_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.25)
def test_return_undefined_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsNone(regularizer_weight)
def test_return_non_default_batch_norm_params_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertAlmostEqual(batch_norm_params['momentum'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
batch_norm_layer = keras_config.build_batch_norm()
self.assertIsInstance(batch_norm_layer,
freezable_batch_norm.FreezableBatchNorm)
def test_return_non_default_batch_norm_params_keras_override(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params(momentum=0.4)
self.assertAlmostEqual(batch_norm_params['momentum'], 0.4)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
def test_do_not_use_batch_norm_if_default_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertFalse(keras_config.use_batch_norm())
self.assertEqual(keras_config.batch_norm_params(), {})
# The batch norm builder should build an identity Lambda layer
identity_layer = keras_config.build_batch_norm()
self.assertIsInstance(identity_layer,
tf.keras.layers.Lambda)
def test_do_not_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertFalse(hyperparams['use_bias'])
def test_force_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
force_use_bias: true
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertTrue(hyperparams['use_bias'])
def test_use_none_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertIsNone(
keras_config.params(include_activation=True)['activation'])
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.identity)
def test_use_relu_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu)
def test_use_relu_6_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu6)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu6)
def test_use_swish_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.swish)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.swish)
def test_override_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
new_params = keras_config.params(activation=tf.nn.relu)
self.assertEqual(new_params['activation'], tf.nn.relu)
def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
def test_keras_initializer_by_name(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
keras_initializer_by_name: "glorot_uniform"
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer_arg = keras_config.params()['kernel_initializer']
conv_layer = tf.keras.layers.Conv2D(
filters=16, kernel_size=3, **keras_config.params())
self.assertEqual(initializer_arg, 'glorot_uniform')
self.assertIsInstance(conv_layer.kernel_initializer,
type(tf.keras.initializers.get('glorot_uniform')))
if __name__ == '__main__':
tf.test.main()
| 35,961 | 33.087204 | 80 | py |
models | models-master/research/object_detection/builders/post_processing_builder.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for post processing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.builders import calibration_builder
from object_detection.core import post_processing
from object_detection.protos import post_processing_pb2
def build(post_processing_config):
"""Builds callables for post-processing operations.
Builds callables for non-max suppression, score conversion, and (optionally)
calibration based on the configuration.
Non-max suppression callable takes `boxes`, `scores`, and optionally
`clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns
`nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See
post_processing.batch_multiclass_non_max_suppression for the type and shape
of these tensors.
Score converter callable should be called with `input` tensor. The callable
returns the output from one of 3 tf operations based on the configuration -
tf.identity, tf.sigmoid or tf.nn.softmax. If a calibration config is provided,
score_converter also applies calibration transformations, as defined in
calibration_builder.py. See tensorflow documentation for argument and return
value descriptions.
Args:
post_processing_config: post_processing.proto object containing the
parameters for the post-processing operations.
Returns:
non_max_suppressor_fn: Callable for non-max suppression.
score_converter_fn: Callable for score conversion.
Raises:
ValueError: if the post_processing_config is of incorrect type.
"""
if not isinstance(post_processing_config, post_processing_pb2.PostProcessing):
raise ValueError('post_processing_config not of type '
'post_processing_pb2.Postprocessing.')
non_max_suppressor_fn = _build_non_max_suppressor(
post_processing_config.batch_non_max_suppression)
score_converter_fn = _build_score_converter(
post_processing_config.score_converter,
post_processing_config.logit_scale)
if post_processing_config.HasField('calibration_config'):
score_converter_fn = _build_calibrated_score_converter(
score_converter_fn,
post_processing_config.calibration_config)
return non_max_suppressor_fn, score_converter_fn
def _build_non_max_suppressor(nms_config):
"""Builds non-max suppresson based on the nms config.
Args:
nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto.
Returns:
non_max_suppressor_fn: Callable non-max suppressor.
Raises:
ValueError: On incorrect iou_threshold or on incompatible values of
max_total_detections and max_detections_per_class or on negative
soft_nms_sigma.
"""
if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0:
raise ValueError('iou_threshold not in [0, 1.0].')
if nms_config.max_detections_per_class > nms_config.max_total_detections:
raise ValueError('max_detections_per_class should be no greater than '
'max_total_detections.')
if nms_config.soft_nms_sigma < 0.0:
raise ValueError('soft_nms_sigma should be non-negative.')
if nms_config.use_combined_nms and nms_config.use_class_agnostic_nms:
raise ValueError('combined_nms does not support class_agnostic_nms.')
non_max_suppressor_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=nms_config.score_threshold,
iou_thresh=nms_config.iou_threshold,
max_size_per_class=nms_config.max_detections_per_class,
max_total_size=nms_config.max_total_detections,
use_static_shapes=nms_config.use_static_shapes,
use_class_agnostic_nms=nms_config.use_class_agnostic_nms,
max_classes_per_detection=nms_config.max_classes_per_detection,
soft_nms_sigma=nms_config.soft_nms_sigma,
use_partitioned_nms=nms_config.use_partitioned_nms,
use_combined_nms=nms_config.use_combined_nms,
change_coordinate_frame=nms_config.change_coordinate_frame,
use_hard_nms=nms_config.use_hard_nms,
use_cpu_nms=nms_config.use_cpu_nms)
return non_max_suppressor_fn
def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale):
"""Create a function to scale logits then apply a Tensorflow function."""
def score_converter_fn(logits):
scaled_logits = tf.multiply(logits, 1.0 / logit_scale, name='scale_logits')
return tf_score_converter_fn(scaled_logits, name='convert_scores')
score_converter_fn.__name__ = '%s_with_logit_scale' % (
tf_score_converter_fn.__name__)
return score_converter_fn
def _build_score_converter(score_converter_config, logit_scale):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
the config.
Args:
score_converter_config: post_processing_pb2.PostProcessing.score_converter.
logit_scale: temperature to use for SOFTMAX score_converter.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
return _score_converter_fn_with_logit_scale(tf.identity, logit_scale)
if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale)
if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale)
raise ValueError('Unknown score converter.')
def _build_calibrated_score_converter(score_converter_fn, calibration_config):
"""Wraps a score_converter_fn, adding a calibration step.
Builds a score converter function with a calibration transformation according
to calibration_builder.py. The score conversion function may be applied before
or after the calibration transformation, depending on the calibration method.
If the method is temperature scaling, the score conversion is
after the calibration transformation. Otherwise, the score conversion is
before the calibration transformation. Calibration applies positive monotonic
transformations to inputs (i.e. score ordering is strictly preserved or
adjacent scores are mapped to the same score). When calibration is
class-agnostic, the highest-scoring class remains unchanged, unless two
adjacent scores are mapped to the same value and one class arbitrarily
selected to break the tie. In per-class calibration, it's possible (though
rare in practice) that the highest-scoring class will change, since positive
monotonicity is only required to hold within each class.
Args:
score_converter_fn: callable that takes logit scores as input.
calibration_config: post_processing_pb2.PostProcessing.calibration_config.
Returns:
Callable calibrated score coverter op.
"""
calibration_fn = calibration_builder.build(calibration_config)
def calibrated_score_converter_fn(logits):
if (calibration_config.WhichOneof('calibrator') ==
'temperature_scaling_calibration'):
calibrated_logits = calibration_fn(logits)
return score_converter_fn(calibrated_logits)
else:
converted_logits = score_converter_fn(logits)
return calibration_fn(converted_logits)
calibrated_score_converter_fn.__name__ = (
'calibrate_with_%s' % calibration_config.WhichOneof('calibrator'))
return calibrated_score_converter_fn
| 8,140 | 43.244565 | 80 | py |
models | models-master/research/object_detection/builders/losses_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
import functools
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import losses
from object_detection.protos import losses_pb2
from object_detection.utils import ops
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
random_example_sampler: BalancedPositiveNegativeSampler object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
ValueError: If random_example_sampler is getting non-positive value as
desired positive example fraction.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
if (loss_config.classification_loss.WhichOneof('classification_loss') ==
'weighted_sigmoid_focal'):
raise ValueError('HardExampleMiner should not be used with sigmoid focal '
'loss')
hard_example_miner = build_hard_example_miner(
loss_config.hard_example_miner,
classification_weight,
localization_weight)
random_example_sampler = None
if loss_config.HasField('random_example_sampler'):
if loss_config.random_example_sampler.positive_sample_fraction <= 0:
raise ValueError('RandomExampleSampler should not use non-positive'
'value as positive sample fraction.')
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=loss_config.random_example_sampler.
positive_sample_fraction)
if loss_config.expected_loss_weights == loss_config.NONE:
expected_loss_weights_fn = None
elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING:
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_expected_sampling,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
elif (loss_config.expected_loss_weights == loss_config
.REWEIGHTING_UNMATCHED_ANCHORS):
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_reweighting_unmatched_anchors,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
else:
raise ValueError('Not a valid value for expected_classification_loss.')
return (classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn)
def build_hard_example_miner(config,
classification_weight,
localization_weight):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type = None
if config.loss_type == losses_pb2.HardExampleMiner.BOTH:
loss_type = 'both'
if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION:
loss_type = 'cls'
if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION:
loss_type = 'loc'
max_negatives_per_positive = None
num_hard_examples = None
if config.max_negatives_per_positive > 0:
max_negatives_per_positive = config.max_negatives_per_positive
if config.num_hard_examples > 0:
num_hard_examples = config.num_hard_examples
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=num_hard_examples,
iou_threshold=config.iou_threshold,
loss_type=loss_type,
cls_loss_weight=classification_weight,
loc_loss_weight=localization_weight,
max_negatives_per_positive=max_negatives_per_positive,
min_negatives_per_image=config.min_negatives_per_image)
return hard_example_miner
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
return losses.WeightedL2LocalizationLoss()
if loss_type == 'weighted_smooth_l1':
return losses.WeightedSmoothL1LocalizationLoss(
loss_config.weighted_smooth_l1.delta)
if loss_type == 'weighted_iou':
return losses.WeightedIOULocalizationLoss()
if loss_type == 'l1_localization_loss':
return losses.L1LocalizationLoss()
if loss_type == 'weighted_giou':
return losses.WeightedGIOULocalizationLoss()
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
elif loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
elif loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
elif loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
elif loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
elif loss_type == 'penalty_reduced_logistic_focal_loss':
config = loss_config.penalty_reduced_logistic_focal_loss
return losses.PenaltyReducedLogisticFocalLoss(
alpha=config.alpha, beta=config.beta)
elif loss_type == 'weighted_dice_classification_loss':
config = loss_config.weighted_dice_classification_loss
return losses.WeightedDiceClassificationLoss(
squared_normalization=config.squared_normalization,
is_prediction_probability=config.is_prediction_probability)
else:
raise ValueError('Empty loss config.')
| 9,885 | 35.479705 | 80 | py |
models | models-master/research/object_detection/builders/hyperparams_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
class KerasLayerHyperparams(object):
"""
A hyperparameter configuration object for Keras layers used in
Object Detection models.
"""
def __init__(self, hyperparams_config):
"""Builds keras hyperparameter config for layers based on the proto config.
It automatically converts from Slim layer hyperparameter configs to
Keras layer hyperparameters. Namely, it:
- Builds Keras initializers/regularizers instead of Slim ones
- sets weights_regularizer/initializer to kernel_regularizer/initializer
- converts batchnorm decay to momentum
- converts Slim l2 regularizer weights to the equivalent Keras l2 weights
Contains a hyperparameter configuration for ops that specifies kernel
initializer, kernel regularizer, activation. Also contains parameters for
batch norm operators based on the configuration.
Note that if the batch_norm parameters are not specified in the config
(i.e. left to default) then batch norm is excluded from the config.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
self._batch_norm_params = None
self._use_sync_batch_norm = False
if hyperparams_config.HasField('batch_norm'):
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.batch_norm)
elif hyperparams_config.HasField('sync_batch_norm'):
self._use_sync_batch_norm = True
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.sync_batch_norm)
self._force_use_bias = hyperparams_config.force_use_bias
self._activation_fn = _build_activation_fn(hyperparams_config.activation)
# TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
# (Those might use depthwise_* instead of kernel_*)
# We should probably switch to using build_conv2d_layer and
# build_depthwise_conv2d_layer methods instead.
self._op_params = {
'kernel_regularizer': _build_keras_regularizer(
hyperparams_config.regularizer),
'kernel_initializer': _build_initializer(
hyperparams_config.initializer, build_for_keras=True),
'activation': _build_activation_fn(hyperparams_config.activation)
}
def use_batch_norm(self):
return self._batch_norm_params is not None
def use_sync_batch_norm(self):
return self._use_sync_batch_norm
def force_use_bias(self):
return self._force_use_bias
def use_bias(self):
return (self._force_use_bias or not
(self.use_batch_norm() and self.batch_norm_params()['center']))
def batch_norm_params(self, **overrides):
"""Returns a dict containing batchnorm layer construction hyperparameters.
Optionally overrides values in the batchnorm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
**overrides: keyword arguments to override in the hyperparams dictionary
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
if self._batch_norm_params is None:
new_batch_norm_params = dict()
else:
new_batch_norm_params = self._batch_norm_params.copy()
new_batch_norm_params.update(overrides)
return new_batch_norm_params
def build_batch_norm(self, training=None, **overrides):
"""Returns a Batch Normalization layer with the appropriate hyperparams.
If the hyperparams are configured to not use batch normalization,
this will return a Keras Lambda layer that only applies tf.Identity,
without doing any normalization.
Optionally overrides values in the batch_norm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
training: if True, the normalization layer will normalize using the batch
statistics. If False, the normalization layer will be frozen and will
act as if it is being used for inference. If None, the layer
will look up the Keras learning phase at `call` time to decide what to
do.
**overrides: batch normalization construction args to override from the
batch_norm hyperparams dictionary.
Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
or a Keras Lambda layer that applies the identity (if use_batch_norm()
is False)
"""
if self.use_batch_norm():
if self._use_sync_batch_norm:
return freezable_sync_batch_norm.FreezableSyncBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return freezable_batch_norm.FreezableBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return tf.keras.layers.Lambda(tf.identity)
def build_activation_layer(self, name='activation'):
"""Returns a Keras layer that applies the desired activation function.
Args:
name: The name to assign the Keras layer.
Returns: A Keras lambda layer that applies the activation function
specified in the hyperparam config, or applies the identity if the
activation function is None.
"""
if self._activation_fn:
return tf.keras.layers.Lambda(self._activation_fn, name=name)
else:
return tf.keras.layers.Lambda(tf.identity, name=name)
def get_regularizer_weight(self):
"""Returns the l1 or l2 regularizer weight.
Returns: A float value corresponding to the l1 or l2 regularization weight,
or None if neither l1 or l2 regularization is defined.
"""
regularizer = self._op_params['kernel_regularizer']
if hasattr(regularizer, 'l1'):
return float(regularizer.l1)
elif hasattr(regularizer, 'l2'):
return float(regularizer.l2)
else:
return None
def params(self, include_activation=False, **overrides):
"""Returns a dict containing the layer construction hyperparameters to use.
Optionally overrides values in the returned dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
include_activation: If False, activation in the returned dictionary will
be set to `None`, and the activation must be applied via a separate
layer created by `build_activation_layer`. If True, `activation` in the
output param dictionary will be set to the activation function
specified in the hyperparams config.
**overrides: keyword arguments to override in the hyperparams dictionary.
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
new_params = self._op_params.copy()
new_params['activation'] = None
if include_activation:
new_params['activation'] = self._activation_fn
new_params['use_bias'] = self.use_bias()
new_params.update(**overrides)
return new_params
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if no normalization parameters are specified in the config,
(i.e. left to default) then both batch norm and group norm are excluded
from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
if hyperparams_config.force_use_bias:
raise ValueError('Hyperparams force_use_bias only supported by '
'KerasLayerHyperparams.')
if hyperparams_config.HasField('sync_batch_norm'):
raise ValueError('Hyperparams sync_batch_norm only supported by '
'KerasLayerHyperparams.')
normalizer_fn = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
normalizer_fn = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
if hyperparams_config.HasField('group_norm'):
normalizer_fn = slim.group_norm
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
def scope_fn():
with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
if batch_norm_params is not None else
context_manager.IdentityContextManager()):
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_slim_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=normalizer_fn) as sc:
return sc
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
if activation_fn == hyperparams_pb2.Hyperparams.SWISH:
return tf.nn.swish
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_slim_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_keras_regularizer(regularizer):
"""Builds a keras regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
Keras regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
# The Keras L2 regularizer weight differs from the Slim L2 regularizer
# weight by a factor of 2
return tf.keras.regularizers.l2(
float(regularizer.l2_regularizer.weight * 0.5))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer, build_for_keras=False):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
build_for_keras: Whether the initializers should be built for Keras
operators. If false builds for Slim.
Returns:
tf initializer or string corresponding to the tf keras initializer name.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
if build_for_keras:
if initializer.variance_scaling_initializer.uniform:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='uniform')
else:
# In TF 1.9 release and earlier, the truncated_normal distribution was
# not supported correctly. So, in these earlier versions of tensorflow,
# the ValueError will be raised, and we manually truncate the
# distribution scale.
#
# It is insufficient to just set distribution to `normal` from the
# start, because the `normal` distribution in newer Tensorflow versions
# creates a truncated distribution, whereas it created untruncated
# distributions in older versions.
try:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='truncated_normal')
except ValueError:
truncate_constant = 0.87962566103423978
truncated_scale = initializer.variance_scaling_initializer.factor / (
truncate_constant * truncate_constant
)
return tf.variance_scaling_initializer(
scale=truncated_scale,
mode=mode.lower(),
distribution='normal')
else:
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
if initializer_oneof == 'keras_initializer_by_name':
if build_for_keras:
return initializer.keras_initializer_by_name
else:
raise ValueError(
'Unsupported non-Keras usage of keras_initializer_by_name: {}'.format(
initializer.keras_initializer_by_name))
if initializer_oneof is None:
return None
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
def _build_keras_batch_norm_params(batch_norm):
"""Build a dictionary of Keras BatchNormalization params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
Returns:
A dictionary containing Keras BatchNormalization parameters.
"""
# Note: Although decay is defined to be 1 - momentum in batch_norm,
# decay in the slim batch_norm layers was erroneously defined and is
# actually the same as momentum in the Keras batch_norm layers.
# For context, see: github.com/keras-team/keras/issues/6839
batch_norm_params = {
'momentum': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
}
return batch_norm_params
| 18,424 | 37.871308 | 80 | py |
models | models-master/research/object_detection/builders/target_assigner_builder_test.py | """Tests for google3.third_party.tensorflow_models.object_detection.builders.target_assigner_builder."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import target_assigner_builder
from object_detection.core import target_assigner
from object_detection.protos import target_assigner_pb2
class TargetAssignerBuilderTest(tf.test.TestCase):
def test_build_a_target_assigner(self):
target_assigner_text_proto = """
matcher {
argmax_matcher {matched_threshold: 0.5}
}
similarity_calculator {
iou_similarity {}
}
box_coder {
faster_rcnn_box_coder {}
}
"""
target_assigner_proto = target_assigner_pb2.TargetAssigner()
text_format.Merge(target_assigner_text_proto, target_assigner_proto)
target_assigner_instance = target_assigner_builder.build(
target_assigner_proto)
self.assertIsInstance(target_assigner_instance,
target_assigner.TargetAssigner)
if __name__ == '__main__':
tf.test.main()
| 1,763 | 33.588235 | 104 | py |
models | models-master/research/object_detection/legacy/trainer_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import unittest
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.legacy import trainer
from object_detection.protos import train_pb2
from object_detection.utils import tf_version
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant('image_000000')
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
multiclass_scores = tf.random_uniform(
[1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss()
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = slim.flatten(preprocessed_inputs)
class_prediction = slim.fully_connected(flattened_inputs, self._num_classes)
box_prediction = slim.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
pass
def restore_map(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(
create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
use_multiclass_scores: true
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
| 9,871 | 32.351351 | 80 | py |
models | models-master/research/object_detection/legacy/evaluator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model evaluator.
This file provides a generic evaluation method that can be used to evaluate a
DetectionModel.
"""
import logging
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import prefetcher
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import object_detection_evaluation
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
# DEPRECATED: please use oid_V2_detection_metrics instead
'open_images_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
# DEPRECATED: please use oid_challenge_detection_metrics instead
'oid_challenge_object_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'oid_challenge_segmentation_metrics':
object_detection_evaluation
.OpenImagesInstanceSegmentationChallengeEvaluator,
}
EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics'
def _extract_predictions_and_losses(model,
create_input_dict_fn,
ignore_groundtruth=False):
"""Constructs tensorflow detection graph and returns output tensors.
Args:
model: model to perform predictions with.
create_input_dict_fn: function to create input tensor dictionaries.
ignore_groundtruth: whether groundtruth should be ignored.
Returns:
prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed
by standard_fields.DetectionResultsFields) and optional groundtruth
tensors (keyed by standard_fields.InputDataFields).
losses_dict: A dictionary containing detection losses. This is empty when
ignore_groundtruth is true.
"""
input_dict = create_input_dict_fn()
prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
input_dict = prefetch_queue.dequeue()
original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)
preprocessed_image, true_image_shapes = model.preprocess(
tf.cast(original_image, dtype=tf.float32))
prediction_dict = model.predict(preprocessed_image, true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
groundtruth = None
losses_dict = {}
if not ignore_groundtruth:
groundtruth = {
fields.InputDataFields.groundtruth_boxes:
input_dict[fields.InputDataFields.groundtruth_boxes],
fields.InputDataFields.groundtruth_classes:
input_dict[fields.InputDataFields.groundtruth_classes],
fields.InputDataFields.groundtruth_area:
input_dict[fields.InputDataFields.groundtruth_area],
fields.InputDataFields.groundtruth_is_crowd:
input_dict[fields.InputDataFields.groundtruth_is_crowd],
fields.InputDataFields.groundtruth_difficult:
input_dict[fields.InputDataFields.groundtruth_difficult]
}
if fields.InputDataFields.groundtruth_group_of in input_dict:
groundtruth[fields.InputDataFields.groundtruth_group_of] = (
input_dict[fields.InputDataFields.groundtruth_group_of])
groundtruth_masks_list = None
if fields.DetectionResultFields.detection_masks in detections:
groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
input_dict[fields.InputDataFields.groundtruth_instance_masks])
groundtruth_masks_list = [
input_dict[fields.InputDataFields.groundtruth_instance_masks]]
groundtruth_keypoints_list = None
if fields.DetectionResultFields.detection_keypoints in detections:
groundtruth[fields.InputDataFields.groundtruth_keypoints] = (
input_dict[fields.InputDataFields.groundtruth_keypoints])
groundtruth_keypoints_list = [
input_dict[fields.InputDataFields.groundtruth_keypoints]]
label_id_offset = 1
model.provide_groundtruth(
[input_dict[fields.InputDataFields.groundtruth_boxes]],
[tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes]
- label_id_offset, depth=model.num_classes)],
groundtruth_masks_list=groundtruth_masks_list,
groundtruth_keypoints_list=groundtruth_keypoints_list)
losses_dict.update(model.loss(prediction_dict, true_image_shapes))
result_dict = eval_util.result_dict_for_single_example(
original_image,
input_dict[fields.InputDataFields.source_id],
detections,
groundtruth,
class_agnostic=(
fields.DetectionResultFields.detection_classes not in detections),
scale_to_absolute=True)
return result_dict, losses_dict
def get_evaluators(eval_config, categories):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: evaluation configurations.
categories: a list of categories to evaluate.
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
if eval_metric_fn_key == 'oid_challenge_object_detection_metrics':
logging.warning(
'oid_challenge_object_detection_metrics is deprecated; '
'use oid_challenge_detection_metrics instead'
)
if eval_metric_fn_key == 'oid_V2_detection_metrics':
logging.warning(
'open_images_V2_detection_metrics is deprecated; '
'use oid_V2_detection_metrics instead'
)
evaluators_list.append(
EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
return evaluators_list
def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None):
"""Evaluation function for detection models.
Args:
create_input_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel.
eval_config: a eval_pb2.EvalConfig protobuf.
categories: a list of category dictionaries. Each dict in the list should
have an integer 'id' field and string 'name' field.
checkpoint_dir: directory to load the checkpoints to evaluate from.
eval_dir: directory to write evaluation metrics summary to.
graph_hook_fn: Optional function that is called after the training graph is
completely built. This is helpful to perform additional changes to the
training graph such as optimizing batchnorm. The function should modify
the default graph.
evaluator_list: Optional list of instances of DetectionEvaluator. If not
given, this list of metrics is created according to the eval_config.
Returns:
metrics: A dictionary containing metric names and values from the latest
run.
"""
model = create_model_fn()
if eval_config.ignore_groundtruth and not eval_config.export_path:
logging.fatal('If ignore_groundtruth=True then an export_path is '
'required. Aborting!!!')
tensor_dict, losses_dict = _extract_predictions_and_losses(
model=model,
create_input_dict_fn=create_input_dict_fn,
ignore_groundtruth=eval_config.ignore_groundtruth)
def _process_batch(tensor_dict, sess, batch_index, counters,
losses_dict=None):
"""Evaluates tensors in tensor_dict, losses_dict and visualizes examples.
This function calls sess.run on tensor_dict, evaluating the original_image
tensor only on the first K examples and visualizing detections overlaid
on this original_image.
Args:
tensor_dict: a dictionary of tensors
sess: tensorflow session
batch_index: the index of the batch amongst all batches in the run.
counters: a dictionary holding 'success' and 'skipped' fields which can
be updated to keep track of number of successful and failed runs,
respectively. If these fields are not updated, then the success/skipped
counter values shown at the end of evaluation will be incorrect.
losses_dict: Optional dictonary of scalar loss tensors.
Returns:
result_dict: a dictionary of numpy arrays
result_losses_dict: a dictionary of scalar losses. This is empty if input
losses_dict is None.
"""
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
logging.info('Skipping image')
counters['skipped'] += 1
return {}, {}
global_step = tf.train.global_step(sess, tf.train.get_global_step())
if batch_index < eval_config.num_visualizations:
tag = 'image-{}'.format(batch_index)
eval_util.visualize_detection_results(
result_dict,
tag,
global_step,
categories=categories,
summary_dir=eval_dir,
export_dir=eval_config.visualization_export_dir,
show_groundtruth=eval_config.visualize_groundtruth_boxes,
groundtruth_box_visualization_color=eval_config.
groundtruth_box_visualization_color,
min_score_thresh=eval_config.min_score_threshold,
max_num_predictions=eval_config.max_num_boxes_to_visualize,
skip_scores=eval_config.skip_scores,
skip_labels=eval_config.skip_labels,
keep_image_id_for_visualization_export=eval_config.
keep_image_id_for_visualization_export)
return result_dict, result_losses_dict
if graph_hook_fn: graph_hook_fn()
variables_to_restore = tf.global_variables()
global_step = tf.train.get_or_create_global_step()
variables_to_restore.append(global_step)
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
def _restore_latest_checkpoint(sess):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(sess, latest_checkpoint)
if not evaluator_list:
evaluator_list = get_evaluators(eval_config, categories)
metrics = eval_util.repeated_checkpoint_run(
tensor_dict=tensor_dict,
summary_dir=eval_dir,
evaluators=evaluator_list,
batch_processor=_process_batch,
checkpoint_dirs=[checkpoint_dir],
variables_to_restore=None,
restore_fn=_restore_latest_checkpoint,
num_batches=eval_config.num_examples,
eval_interval_secs=eval_config.eval_interval_secs,
max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else
eval_config.max_evals
if eval_config.max_evals else None),
master=eval_config.eval_master,
save_graph=eval_config.save_graph,
save_graph_dir=(eval_dir if eval_config.save_graph else ''),
losses_dict=losses_dict,
eval_export_path=eval_config.export_path)
return metrics
| 12,995 | 42.32 | 80 | py |
models | models-master/research/object_detection/legacy/eval.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Evaluation executable for detection models.
This executable is used to evaluate DetectionModels. There are two ways of
configuring the eval job.
1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead.
In this mode, the --eval_training_data flag may be given to force the pipeline
to evaluate on training data instead.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files may be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being evaluated, an
input_reader_pb2.InputReader file to specify what data the model is evaluating
and an eval_pb2.EvalConfig file to configure evaluation parameters.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--eval_config_path=eval_config.pbtxt \
--model_config_path=model_config.pbtxt \
--input_config_path=eval_input_config.pbtxt
"""
import functools
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import evaluator
from object_detection.utils import config_util
from object_detection.utils import label_map_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_string(
'checkpoint_dir', '',
'Directory containing checkpoints to evaluate, typically '
'set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.')
flags.DEFINE_string(
'pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('eval_config_path', '',
'Path to an eval_pb2.EvalConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
flags.DEFINE_boolean(
'run_once', False, 'Option to only run a single pass of '
'evaluation. Overrides the `max_evals` parameter in the '
'provided config.')
FLAGS = flags.FLAGS
@deprecated(None, 'Use object_detection/model_main.py.')
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.gfile.MakeDirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.gfile.Copy(
FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build, model_config=model_config, is_training=False)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
categories = label_map_util.create_categories_from_labelmap(
input_config.label_map_path)
if FLAGS.run_once:
eval_config.max_evals = 1
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=False)
evaluator.evaluate(
create_input_dict_fn,
model_fn,
eval_config,
categories,
FLAGS.checkpoint_dir,
FLAGS.eval_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
| 5,462 | 37.202797 | 80 | py |
models | models-master/research/object_detection/legacy/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/legacy/train.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import trainer
from object_detection.utils import config_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
@deprecated(None, 'Use object_detection/model_main.py.')
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=True)
trainer.train(
create_input_dict_fn,
model_fn,
train_config,
master,
task,
FLAGS.num_clones,
worker_replicas,
FLAGS.clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
FLAGS.train_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
| 6,874 | 35.764706 | 80 | py |
models | models-master/research/object_detection/legacy/trainer.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import batcher
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.cast(images, dtype=tf.float32)
tensor_dict[fields.InputDataFields.image] = float_images
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores
in tensor_dict)
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list,
groundtruth_weights_list) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list,
groundtruth_weights_list=groundtruth_weights_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the inference graph is
built (before optimization). This is helpful to perform additional changes
to the training graph such as adding FakeQuant ops. The function should
modify the default graph.
Raises:
ValueError: If both num_clones > 1 and train_config.sync_replicas is true.
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
if num_clones != 1 and train_config.sync_replicas:
raise ValueError('In Synchronous SGD mode num_clones must ',
'be 1. Found num_clones: {}'.format(num_clones))
batch_size = train_config.batch_size // num_clones
if train_config.sync_replicas:
batch_size //= train_config.replicas_to_aggregate
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(
batch_size, create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity, data_augmentation_options)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var, family='LearningRate')
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=worker_replicas)
sync_optimizer = training_optimizer
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram('ModelVars/' +
model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name,
loss_tensor))
global_summaries.add(
tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint,
include_global_step=False))
init_saver = tf.train.Saver(available_var_map)
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
| 17,685 | 41.514423 | 80 | py |
models | models-master/research/object_detection/data_decoders/tf_sequence_example_decoder.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence example decoder for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from tf_slim import tfexample_decoder as slim_example_decoder
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.utils import label_map_util
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return self._name_to_id_table.lookup(unmapped_tensor)
class TfSequenceExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Sequence Example proto decoder for Object Detection.
Sequence examples contain sequences of images which share common
features. The structure of TfSequenceExamples can be seen in
dataset_tools/seq_example_util.py
For the TFODAPI, the following fields are required:
Shared features:
'image/format'
'image/height'
'image/width'
Features with an entry for each image, where bounding box features can
be empty lists if the image does not contain any objects:
'image/encoded'
'image/source_id'
'region/bbox/xmin'
'region/bbox/xmax'
'region/bbox/ymin'
'region/bbox/ymax'
'region/label/string'
Optionally, the sequence example can include context_features for use in
Context R-CNN (see https://arxiv.org/abs/1912.03538):
'image/context_features'
'image/context_feature_length'
'image/context_features_image_id_list'
"""
def __init__(self,
label_map_proto_file,
load_context_features=False,
load_context_image_ids=False,
use_display_name=False,
fully_annotated=False):
"""Constructs `TfSequenceExampleDecoder` object.
Args:
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. The
label map will be used to map IDs of 'region/label/string'.
It is assumed that 'region/label/string' will be in the data.
load_context_features: Whether to load information from context_features,
to provide additional context to a detection model for training and/or
inference
load_context_image_ids: Whether to load the corresponding image ids for
the context_features in order to visualize attention.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
fully_annotated: If True, will assume that every frame (whether it has
boxes or not), has been fully annotated. If False, a
'region/is_annotated' field must be provided in the dataset which
indicates which frames have annotations. Default False.
"""
# Specifies how the tf.SequenceExamples are decoded.
self._context_keys_to_features = {
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature((), tf.int64),
'image/width': tf.FixedLenFeature((), tf.int64),
}
self._sequence_keys_to_feature_lists = {
'image/encoded': tf.FixedLenSequenceFeature([], dtype=tf.string),
'image/source_id': tf.FixedLenSequenceFeature([], dtype=tf.string),
'region/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'region/label/string': tf.VarLenFeature(dtype=tf.string),
'region/label/confidence': tf.VarLenFeature(dtype=tf.float32),
}
self._items_to_handlers = {
# Context.
fields.InputDataFields.image_height:
slim_example_decoder.Tensor('image/height'),
fields.InputDataFields.image_width:
slim_example_decoder.Tensor('image/width'),
# Sequence.
fields.InputDataFields.num_groundtruth_boxes:
slim_example_decoder.NumBoxesSequence('region/bbox/xmin'),
fields.InputDataFields.groundtruth_boxes:
slim_example_decoder.BoundingBoxSequence(
prefix='region/bbox/', default_value=0.0),
fields.InputDataFields.groundtruth_weights:
slim_example_decoder.Tensor('region/label/confidence'),
}
# If the dataset is sparsely annotated, parse sequence features which
# indicate which frames have been labeled.
if not fully_annotated:
self._sequence_keys_to_feature_lists['region/is_annotated'] = (
tf.FixedLenSequenceFeature([], dtype=tf.int64))
self._items_to_handlers[fields.InputDataFields.is_annotated] = (
slim_example_decoder.Tensor('region/is_annotated'))
self._items_to_handlers[fields.InputDataFields.image] = (
slim_example_decoder.Tensor('image/encoded'))
self._items_to_handlers[fields.InputDataFields.source_id] = (
slim_example_decoder.Tensor('image/source_id'))
label_handler = _ClassTensorHandler(
'region/label/string', label_map_proto_file, default_value='')
self._items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
if load_context_features:
self._context_keys_to_features['image/context_features'] = (
tf.VarLenFeature(dtype=tf.float32))
self._items_to_handlers[fields.InputDataFields.context_features] = (
slim_example_decoder.ItemHandlerCallback(
['image/context_features', 'image/context_feature_length'],
self._reshape_context_features))
self._context_keys_to_features['image/context_feature_length'] = (
tf.FixedLenFeature((), tf.int64))
self._items_to_handlers[fields.InputDataFields.context_feature_length] = (
slim_example_decoder.Tensor('image/context_feature_length'))
if load_context_image_ids:
self._context_keys_to_features['image/context_features_image_id_list'] = (
tf.VarLenFeature(dtype=tf.string))
self._items_to_handlers[
fields.InputDataFields.context_features_image_id_list] = (
slim_example_decoder.Tensor(
'image/context_features_image_id_list',
default_value=''))
self._fully_annotated = fully_annotated
def decode(self, tf_seq_example_string_tensor):
"""Decodes serialized `tf.SequenceExample`s and returns a tensor dictionary.
Args:
tf_seq_example_string_tensor: a string tensor holding a serialized
`tf.SequenceExample`.
Returns:
A list of dictionaries with (at least) the following tensors:
fields.InputDataFields.source_id: a [num_frames] string tensor with a
unique ID for each frame.
fields.InputDataFields.num_groundtruth_boxes: a [num_frames] int32 tensor
specifying the number of boxes in each frame.
fields.InputDataFields.groundtruth_boxes: a [num_frames, num_boxes, 4]
float32 tensor with bounding boxes for each frame. Note that num_boxes
is the maximum boxes seen in any individual frame. Any frames with fewer
boxes are padded with 0.0.
fields.InputDataFields.groundtruth_classes: a [num_frames, num_boxes]
int32 tensor with class indices for each box in each frame.
fields.InputDataFields.groundtruth_weights: a [num_frames, num_boxes]
float32 tensor with weights of the groundtruth boxes.
fields.InputDataFields.is_annotated: a [num_frames] bool tensor specifying
whether the image was annotated or not. If False, the corresponding
entries in the groundtruth tensor will be ignored.
fields.InputDataFields.context_features - 1D float32 tensor of shape
[context_feature_length * num_context_features]
fields.InputDataFields.context_feature_length - int32 tensor specifying
the length of each feature in context_features
fields.InputDataFields.image: a [num_frames] string tensor with
the encoded images.
fields.inputDataFields.context_features_image_id_list: a 1D vector
of shape [num_context_features] containing string tensors.
"""
serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFSequenceExampleDecoder(
self._context_keys_to_features, self._sequence_keys_to_feature_lists,
self._items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(list(zip(keys, tensors)))
tensor_dict[fields.InputDataFields.groundtruth_boxes].set_shape(
[None, None, 4])
tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.cast(
tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
dtype=tf.int32)
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_classes], dtype=tf.int32)
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.cast(
tf.stack([
tensor_dict[fields.InputDataFields.image_height],
tensor_dict[fields.InputDataFields.image_width]
]),
dtype=tf.int32)
tensor_dict.pop(fields.InputDataFields.image_height)
tensor_dict.pop(fields.InputDataFields.image_width)
def default_groundtruth_weights():
"""Produces weights of 1.0 for each valid box, and 0.0 otherwise."""
num_boxes_per_frame = tensor_dict[
fields.InputDataFields.num_groundtruth_boxes]
max_num_boxes = tf.reduce_max(num_boxes_per_frame)
num_boxes_per_frame_tiled = tf.tile(
tf.expand_dims(num_boxes_per_frame, axis=-1),
multiples=tf.stack([1, max_num_boxes]))
range_tiled = tf.tile(
tf.expand_dims(tf.range(max_num_boxes), axis=0),
multiples=tf.stack([tf.shape(num_boxes_per_frame)[0], 1]))
return tf.cast(
tf.greater(num_boxes_per_frame_tiled, range_tiled), tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.size(tensor_dict[fields.InputDataFields.groundtruth_weights]),
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
if self._fully_annotated:
tensor_dict[fields.InputDataFields.is_annotated] = tf.ones_like(
tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
dtype=tf.bool)
else:
tensor_dict[fields.InputDataFields.is_annotated] = tf.cast(
tensor_dict[fields.InputDataFields.is_annotated], dtype=tf.bool)
return tensor_dict
def _reshape_context_features(self, keys_to_tensors):
"""Reshape context features.
The instance context_features are reshaped to
[num_context_features, context_feature_length]
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D float tensor of shape [num_context_features, context_feature_length]
"""
context_feature_length = keys_to_tensors['image/context_feature_length']
to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)
context_features = keys_to_tensors['image/context_features']
if isinstance(context_features, tf.SparseTensor):
context_features = tf.sparse_tensor_to_dense(context_features)
context_features = tf.reshape(context_features, to_shape)
return context_features
| 14,582 | 43.057402 | 80 | py |
models | models-master/research/object_detection/data_decoders/tf_example_decoder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from tf_slim import tfexample_decoder as slim_example_decoder
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
_LABEL_OFFSET = 1
# The field name of hosting keypoint text feature. Only used within this file
# to help forming the keypoint related features.
_KEYPOINT_TEXT_FIELD = 'image/object/keypoint/text'
class Visibility(enum.Enum):
"""Visibility definitions.
This follows the MS Coco convention (http://cocodataset.org/#format-data).
"""
# Keypoint is not labeled.
UNLABELED = 0
# Keypoint is labeled but falls outside the object segment (e.g. occluded).
NOT_VISIBLE = 1
# Keypoint is labeled and visible.
VISIBLE = 2
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
display_name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=True)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
display_name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(display_name_to_id.keys())),
values=tf.constant(
list(display_name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
self._display_name_to_id_table = display_name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor),
self._display_name_to_id_table.lookup(unmapped_tensor))
class TfExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
load_instance_masks=False,
instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
label_map_proto_file=None,
use_display_name=False,
dct_method='',
num_keypoints=0,
num_additional_channels=0,
load_multiclass_scores=False,
load_context_features=False,
expand_hierarchy_labels=False,
load_dense_pose=False,
load_track_id=False,
load_keypoint_depth_features=False,
use_keypoint_label_map=False):
"""Constructor sets keys_to_features and items_to_handlers.
Args:
load_instance_masks: whether or not to load and handle instance masks.
instance_mask_type: type of instance masks. Options are provided in
input_reader.proto. This is only used if `load_instance_masks` is True.
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. If provided, then the
mapped IDs of 'image/object/class/text' will take precedence over the
existing 'image/object/class/label' ID. Also, if provided, it is
assumed that 'image/object/class/text' will be in the data.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
dct_method: An optional string. Defaults to None. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
num_keypoints: the number of keypoints per object.
num_additional_channels: how many additional channels to use.
load_multiclass_scores: Whether to load multiclass scores associated with
boxes.
load_context_features: Whether to load information from context_features,
to provide additional context to a detection model for training and/or
inference.
expand_hierarchy_labels: Expands the object and image labels taking into
account the provided hierarchy in the label_map_proto_file. For positive
classes, the labels are extended to ancestor. For negative classes,
the labels are expanded to descendants.
load_dense_pose: Whether to load DensePose annotations.
load_track_id: Whether to load tracking annotations.
load_keypoint_depth_features: Whether to load the keypoint depth features
including keypoint relative depths and weights. If this field is set to
True but no keypoint depth features are in the input tf.Example, then
default values will be populated.
use_keypoint_label_map: If set to True, the 'image/object/keypoint/text'
field will be used to map the keypoint coordinates (using the label
map defined in label_map_proto_file) instead of assuming the ordering
in the tf.Example feature. This is useful when training with multiple
datasets while each of them contains different subset of keypoint
annotations.
Raises:
ValueError: If `instance_mask_type` option is not one of
input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or
input_reader_pb2.PNG_MASKS.
ValueError: If `expand_labels_hierarchy` is True, but the
`label_map_proto_file` is not provided.
"""
# TODO(rathodv): delete unused `use_display_name` argument once we change
# other decoders to handle label maps similarly.
del use_display_name
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=1),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=1),
# Image-level labels.
'image/class/text':
tf.VarLenFeature(tf.string),
'image/class/label':
tf.VarLenFeature(tf.int64),
'image/neg_category_ids':
tf.VarLenFeature(tf.int64),
'image/not_exhaustive_category_ids':
tf.VarLenFeature(tf.int64),
'image/class/confidence':
tf.VarLenFeature(tf.float32),
# Object boxes and classes.
'image/object/bbox/xmin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(tf.float32),
'image/object/class/label':
tf.VarLenFeature(tf.int64),
'image/object/class/text':
tf.VarLenFeature(tf.string),
'image/object/area':
tf.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.VarLenFeature(tf.int64),
'image/object/difficult':
tf.VarLenFeature(tf.int64),
'image/object/group_of':
tf.VarLenFeature(tf.int64),
'image/object/weight':
tf.VarLenFeature(tf.float32),
}
# We are checking `dct_method` instead of passing it directly in order to
# ensure TF version 1.6 compatibility.
if dct_method:
image = slim_example_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3,
dct_method=dct_method)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True,
dct_method=dct_method)
else:
image = slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True)
self.items_to_handlers = {
fields.InputDataFields.image:
image,
fields.InputDataFields.source_id: (
slim_example_decoder.Tensor('image/source_id')),
fields.InputDataFields.key: (
slim_example_decoder.Tensor('image/key/sha256')),
fields.InputDataFields.filename: (
slim_example_decoder.Tensor('image/filename')),
# Image-level labels.
fields.InputDataFields.groundtruth_image_confidences: (
slim_example_decoder.Tensor('image/class/confidence')),
fields.InputDataFields.groundtruth_verified_neg_classes: (
slim_example_decoder.Tensor('image/neg_category_ids')),
fields.InputDataFields.groundtruth_not_exhaustive_classes: (
slim_example_decoder.Tensor('image/not_exhaustive_category_ids')),
# Object boxes and classes.
fields.InputDataFields.groundtruth_boxes: (
slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/')),
fields.InputDataFields.groundtruth_area:
slim_example_decoder.Tensor('image/object/area'),
fields.InputDataFields.groundtruth_is_crowd: (
slim_example_decoder.Tensor('image/object/is_crowd')),
fields.InputDataFields.groundtruth_difficult: (
slim_example_decoder.Tensor('image/object/difficult')),
fields.InputDataFields.groundtruth_group_of: (
slim_example_decoder.Tensor('image/object/group_of')),
fields.InputDataFields.groundtruth_weights: (
slim_example_decoder.Tensor('image/object/weight')),
}
self._keypoint_label_map = None
if use_keypoint_label_map:
assert label_map_proto_file is not None
self._keypoint_label_map = label_map_util.get_keypoint_label_map_dict(
label_map_proto_file)
# We use a default_value of -1, but we expect all labels to be
# contained in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
self._kpts_name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(self._keypoint_label_map.keys())),
values=tf.constant(
list(self._keypoint_label_map.values()), dtype=tf.int64)),
default_value=-1)
self.keys_to_features[_KEYPOINT_TEXT_FIELD] = tf.VarLenFeature(
tf.string)
self.items_to_handlers[_KEYPOINT_TEXT_FIELD] = (
slim_example_decoder.ItemHandlerCallback(
[_KEYPOINT_TEXT_FIELD], self._keypoint_text_handle))
if load_multiclass_scores:
self.keys_to_features[
'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.multiclass_scores] = (
slim_example_decoder.Tensor('image/object/class/multiclass_scores'))
if load_context_features:
self.keys_to_features[
'image/context_features'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.context_features] = (
slim_example_decoder.ItemHandlerCallback(
['image/context_features', 'image/context_feature_length'],
self._reshape_context_features))
self.keys_to_features[
'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64)
self.items_to_handlers[fields.InputDataFields.context_feature_length] = (
slim_example_decoder.Tensor('image/context_feature_length'))
if num_additional_channels > 0:
self.keys_to_features[
'image/additional_channels/encoded'] = tf.FixedLenFeature(
(num_additional_channels,), tf.string)
self.items_to_handlers[
fields.InputDataFields.
image_additional_channels] = additional_channel_image
self._num_keypoints = num_keypoints
if num_keypoints > 0:
self.keys_to_features['image/object/keypoint/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/visibility'] = (
tf.VarLenFeature(tf.int64))
self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/y', 'image/object/keypoint/x'],
self._reshape_keypoints))
kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities
self.items_to_handlers[kpt_vis_field] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x', 'image/object/keypoint/visibility'],
self._reshape_keypoint_visibilities))
if load_keypoint_depth_features:
self.keys_to_features['image/object/keypoint/z'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/z/weights'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_keypoint_depths] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x', 'image/object/keypoint/z'],
self._reshape_keypoint_depths))
self.items_to_handlers[
fields.InputDataFields.groundtruth_keypoint_depth_weights] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x',
'image/object/keypoint/z/weights'],
self._reshape_keypoint_depth_weights))
if load_instance_masks:
if instance_mask_type in (input_reader_pb2.DEFAULT,
input_reader_pb2.NUMERICAL_MASKS):
self.keys_to_features['image/object/mask'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._reshape_instance_masks))
elif instance_mask_type == input_reader_pb2.PNG_MASKS:
self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string)
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._decode_png_instance_masks))
else:
raise ValueError('Did not recognize the `instance_mask_type` option.')
self.keys_to_features['image/object/mask/weight'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_mask_weights] = (
slim_example_decoder.Tensor('image/object/mask/weight'))
if load_dense_pose:
self.keys_to_features['image/object/densepose/num'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/part_index'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/u'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/v'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_num_points] = (
slim_example_decoder.Tensor('image/object/densepose/num'))
self.items_to_handlers[fields.InputDataFields.groundtruth_dp_part_ids] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/part_index',
'image/object/densepose/num'], self._dense_pose_part_indices))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_surface_coords] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/x', 'image/object/densepose/y',
'image/object/densepose/u', 'image/object/densepose/v',
'image/object/densepose/num'],
self._dense_pose_surface_coordinates))
if load_track_id:
self.keys_to_features['image/object/track/label'] = (
tf.VarLenFeature(tf.int64))
self.items_to_handlers[
fields.InputDataFields.groundtruth_track_ids] = (
slim_example_decoder.Tensor('image/object/track/label'))
if label_map_proto_file:
# If the label_map_proto is provided, try to use it in conjunction with
# the class text, and fall back to a materialized ID.
label_handler = slim_example_decoder.BackupHandler(
_ClassTensorHandler(
'image/object/class/text', label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor('image/object/class/label'))
image_label_handler = slim_example_decoder.BackupHandler(
_ClassTensorHandler(
fields.TfExampleFields.image_class_text,
label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label))
else:
label_handler = slim_example_decoder.Tensor('image/object/class/label')
image_label_handler = slim_example_decoder.Tensor(
fields.TfExampleFields.image_class_label)
self.items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
self.items_to_handlers[
fields.InputDataFields.groundtruth_image_classes] = image_label_handler
self._expand_hierarchy_labels = expand_hierarchy_labels
self._ancestors_lut = None
self._descendants_lut = None
if expand_hierarchy_labels:
if label_map_proto_file:
ancestors_lut, descendants_lut = (
label_map_util.get_label_map_hierarchy_lut(label_map_proto_file,
True))
self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64)
self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64)
else:
raise ValueError('In order to expand labels, the label_map_proto_file '
'has to be provided.')
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
containing image.
fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of
shape [2] containing shape of the image.
fields.InputDataFields.source_id - string tensor containing original
image id.
fields.InputDataFields.key - string tensor with unique sha256 hash key.
fields.InputDataFields.filename - string tensor with original dataset
filename.
fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
[None] containing classes for the boxes.
fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
shape [None] indicating the weights of groundtruth boxes.
fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
[None] containing containing object mask area in pixel squared.
fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
Optional:
fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of
shape [None] indicating if a class is present in the image (1.0) or
a class is not present in the image (0.0).
fields.InputDataFields.image_additional_channels - 3D uint8 tensor of
shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim
is width; 3rd dim is the number of additional channels.
fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of
shape [None, num_keypoints, 2] containing keypoints, where the
coordinates of the keypoints are ordered (y, x).
fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool
tensor of shape [None, num_keypoints] containing keypoint visibilites.
fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
fields.InputDataFields.groundtruth_instance_mask_weights - 1D float32
tensor of shape [None] containing weights. These are typically values
in {0.0, 1.0} which indicate whether to consider the mask related to an
object.
fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape
[None] containing classes for the boxes.
fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape
[None * num_classes] containing flattened multiclass scores for
groundtruth boxes.
fields.InputDataFields.context_features - 1D float32 tensor of shape
[context_feature_length * num_context_features]
fields.InputDataFields.context_feature_length - int32 tensor specifying
the length of each feature in context_features
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(
tensor_dict[fields.InputDataFields.image])[:2]
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
channels = tf.squeeze(channels, axis=3)
channels = tf.transpose(channels, perm=[1, 2, 0])
tensor_dict[fields.InputDataFields.image_additional_channels] = channels
def default_groundtruth_weights():
return tf.ones(
[tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],
dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
if fields.InputDataFields.groundtruth_instance_masks in tensor_dict:
gt_instance_masks = tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
num_gt_instance_masks = tf.shape(gt_instance_masks)[0]
gt_instance_mask_weights = tensor_dict[
fields.InputDataFields.groundtruth_instance_mask_weights]
num_gt_instance_mask_weights = tf.shape(gt_instance_mask_weights)[0]
def default_groundtruth_instance_mask_weights():
return tf.ones([num_gt_instance_masks], dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_instance_mask_weights] = (
tf.cond(tf.greater(num_gt_instance_mask_weights, 0),
lambda: gt_instance_mask_weights,
default_groundtruth_instance_mask_weights))
if fields.InputDataFields.groundtruth_keypoints in tensor_dict:
gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints
gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
if self._keypoint_label_map is None:
# Set all keypoints that are not labeled to NaN.
tensor_dict[gt_kpt_fld] = tf.reshape(tensor_dict[gt_kpt_fld],
[-1, self._num_keypoints, 2])
tensor_dict[gt_kpt_vis_fld] = tf.reshape(
tensor_dict[gt_kpt_vis_fld], [-1, self._num_keypoints])
visibilities_tiled = tf.tile(
tf.expand_dims(tensor_dict[gt_kpt_vis_fld], axis=-1), [1, 1, 2])
tensor_dict[gt_kpt_fld] = tf.where(
visibilities_tiled, tensor_dict[gt_kpt_fld],
np.nan * tf.ones_like(tensor_dict[gt_kpt_fld]))
else:
num_instances = tf.shape(tensor_dict['groundtruth_classes'])[0]
def true_fn(num_instances):
"""Logics to process the tensor when num_instances is not zero."""
kpts_idx = tf.cast(self._kpts_name_to_id_table.lookup(
tensor_dict[_KEYPOINT_TEXT_FIELD]), dtype=tf.int32)
num_kpt_texts = tf.cast(
tf.size(tensor_dict[_KEYPOINT_TEXT_FIELD]) / num_instances,
dtype=tf.int32)
# Prepare the index of the instances: [num_instances, num_kpt_texts].
instance_idx = tf.tile(
tf.expand_dims(tf.range(num_instances, dtype=tf.int32), axis=-1),
[1, num_kpt_texts])
# Prepare the index of the keypoints to scatter the keypoint
# coordinates: [num_kpts_texts * num_instances, 2].
full_kpt_idx = tf.concat([
tf.reshape(
instance_idx, shape=[num_kpt_texts * num_instances, 1]),
tf.expand_dims(kpts_idx, axis=-1)
], axis=1)
# Get the mask and gather only the keypoints with non-negative
# indices (i.e. the keypoint labels in the image/object/keypoint/text
# but do not exist in the label map).
valid_mask = tf.greater_equal(kpts_idx, 0)
full_kpt_idx = tf.boolean_mask(full_kpt_idx, valid_mask)
gt_kpt = tf.scatter_nd(
full_kpt_idx,
tf.boolean_mask(tensor_dict[gt_kpt_fld], valid_mask),
shape=[num_instances, self._num_keypoints, 2])
gt_kpt_vis = tf.cast(tf.scatter_nd(
full_kpt_idx,
tf.boolean_mask(tensor_dict[gt_kpt_vis_fld], valid_mask),
shape=[num_instances, self._num_keypoints]), dtype=tf.bool)
visibilities_tiled = tf.tile(
tf.expand_dims(gt_kpt_vis, axis=-1), [1, 1, 2])
gt_kpt = tf.where(visibilities_tiled, gt_kpt,
np.nan * tf.ones_like(gt_kpt))
return (gt_kpt, gt_kpt_vis)
def false_fn():
"""Logics to process the tensor when num_instances is zero."""
return (tf.zeros([0, self._num_keypoints, 2], dtype=tf.float32),
tf.zeros([0, self._num_keypoints], dtype=tf.bool))
true_fn = functools.partial(true_fn, num_instances)
results = tf.cond(num_instances > 0, true_fn, false_fn)
tensor_dict[gt_kpt_fld] = results[0]
tensor_dict[gt_kpt_vis_fld] = results[1]
# Since the keypoint text tensor won't be used anymore, deleting it from
# the tensor_dict to avoid further code changes to handle it in the
# inputs.py file.
del tensor_dict[_KEYPOINT_TEXT_FIELD]
if self._expand_hierarchy_labels:
input_fields = fields.InputDataFields
image_classes, image_confidences = self._expand_image_label_hierarchy(
tensor_dict[input_fields.groundtruth_image_classes],
tensor_dict[input_fields.groundtruth_image_confidences])
tensor_dict[input_fields.groundtruth_image_classes] = image_classes
tensor_dict[input_fields.groundtruth_image_confidences] = (
image_confidences)
box_fields = [
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_weights,
]
def expand_field(field_name):
return self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[field_name])
# pylint: disable=cell-var-from-loop
for field in box_fields:
if field in tensor_dict:
tensor_dict[field] = tf.cond(
tf.size(tensor_dict[field]) > 0, lambda: expand_field(field),
lambda: tensor_dict[field])
# pylint: enable=cell-var-from-loop
tensor_dict[input_fields.groundtruth_classes] = (
self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[input_fields.groundtruth_classes], True))
if fields.InputDataFields.groundtruth_group_of in tensor_dict:
group_of = fields.InputDataFields.groundtruth_group_of
tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool)
if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict:
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points],
dtype=tf.int32)
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids],
dtype=tf.int32)
if fields.InputDataFields.groundtruth_track_ids in tensor_dict:
tensor_dict[fields.InputDataFields.groundtruth_track_ids] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_track_ids],
dtype=tf.int32)
return tensor_dict
def _keypoint_text_handle(self, keys_to_tensors):
"""Reshapes keypoint text feature."""
y = keys_to_tensors[_KEYPOINT_TEXT_FIELD]
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
return y
def _reshape_keypoints(self, keys_to_tensors):
"""Reshape keypoints.
The keypoints are reshaped to [num_instances, num_keypoints, 2].
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/y'
Returns:
A 2-D float tensor of shape [num_instances * num_keypoints, 2] with values
in [0, 1].
"""
y = keys_to_tensors['image/object/keypoint/y']
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
y = tf.expand_dims(y, 1)
x = keys_to_tensors['image/object/keypoint/x']
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
x = tf.expand_dims(x, 1)
keypoints = tf.concat([y, x], 1)
return keypoints
def _reshape_keypoint_depths(self, keys_to_tensors):
"""Reshape keypoint depths.
The keypoint depths are reshaped to [num_instances, num_keypoints]. The
keypoint depth tensor is expected to have the same shape as the keypoint x
(or y) tensors. If not (usually because the example does not have the depth
groundtruth), then default depth values (zero) are provided.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/z'
Returns:
A 2-D float tensor of shape [num_instances, num_keypoints] with values
representing the keypoint depths.
"""
x = keys_to_tensors['image/object/keypoint/x']
z = keys_to_tensors['image/object/keypoint/z']
if isinstance(z, tf.SparseTensor):
z = tf.sparse_tensor_to_dense(z)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_z = tf.zeros_like(x)
# Use keypoint depth groundtruth if provided, otherwise use the default
# depth value.
z = tf.cond(tf.equal(tf.size(x), tf.size(z)),
true_fn=lambda: z,
false_fn=lambda: default_z)
z = tf.reshape(z, [-1, self._num_keypoints])
return z
def _reshape_keypoint_depth_weights(self, keys_to_tensors):
"""Reshape keypoint depth weights.
The keypoint depth weights are reshaped to [num_instances, num_keypoints].
The keypoint depth weights tensor is expected to have the same shape as the
keypoint x (or y) tensors. If not (usually because the example does not have
the depth weights groundtruth), then default weight values (zero) are
provided.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/z/weights'
Returns:
A 2-D float tensor of shape [num_instances, num_keypoints] with values
representing the keypoint depth weights.
"""
x = keys_to_tensors['image/object/keypoint/x']
z = keys_to_tensors['image/object/keypoint/z/weights']
if isinstance(z, tf.SparseTensor):
z = tf.sparse_tensor_to_dense(z)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_z = tf.zeros_like(x)
# Use keypoint depth weights if provided, otherwise use the default
# values.
z = tf.cond(tf.equal(tf.size(x), tf.size(z)),
true_fn=lambda: z,
false_fn=lambda: default_z)
z = tf.reshape(z, [-1, self._num_keypoints])
return z
def _reshape_keypoint_visibilities(self, keys_to_tensors):
"""Reshape keypoint visibilities.
The keypoint visibilities are reshaped to [num_instances,
num_keypoints].
The raw keypoint visibilities are expected to conform to the
MSCoco definition. See Visibility enum.
The returned boolean is True for the labeled case (either
Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories
that COCO uses to evaluate keypoint detection performance:
http://cocodataset.org/#keypoints-eval
If image/object/keypoint/visibility is not provided, visibilities will be
set to True for finite keypoint coordinate values, and 0 if the coordinates
are NaN.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/visibility'
Returns:
A 1-D bool tensor of shape [num_instances * num_keypoints] with values
in {0, 1}. 1 if the keypoint is labeled, 0 otherwise.
"""
x = keys_to_tensors['image/object/keypoint/x']
vis = keys_to_tensors['image/object/keypoint/visibility']
if isinstance(vis, tf.SparseTensor):
vis = tf.sparse_tensor_to_dense(vis)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_vis = tf.where(
tf.math.is_nan(x),
Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64),
Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64))
# Use visibility if provided, otherwise use the default visibility.
vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)),
true_fn=lambda: vis,
false_fn=lambda: default_vis)
vis = tf.math.logical_or(
tf.math.equal(vis, Visibility.NOT_VISIBLE.value),
tf.math.equal(vis, Visibility.VISIBLE.value))
return vis
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(
tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape)
return tf.cast(masks, tf.float32)
def _reshape_context_features(self, keys_to_tensors):
"""Reshape context features.
The instance context_features are reshaped to
[num_context_features, context_feature_length]
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D float tensor of shape [num_context_features, context_feature_length]
"""
context_feature_length = keys_to_tensors['image/context_feature_length']
to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)
context_features = keys_to_tensors['image/context_features']
if isinstance(context_features, tf.SparseTensor):
context_features = tf.sparse_tensor_to_dense(context_features)
context_features = tf.reshape(context_features, to_shape)
return context_features
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.cast(tf.greater(image, 0), dtype=tf.float32)
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32)))
def _dense_pose_part_indices(self, keys_to_tensors):
"""Creates a tensor that contains part indices for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D int32 tensor of shape [num_instances, num_points] where each element
contains the DensePose part index (0-23). The value `num_points`
corresponds to the maximum number of sampled points across all instances
in the image. Note that instances with less sampled points will be padded
with zeros in the last dimension.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
part_index = keys_to_tensors['image/object/densepose/part_index']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(part_index, tf.SparseTensor):
part_index = tf.sparse_tensor_to_dense(part_index)
part_index = tf.cast(part_index, dtype=tf.int32)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_parts_tensor(instance_ind):
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
part_inds = part_index[points_range_start:points_range_end]
return shape_utils.pad_or_clip_nd(part_inds,
output_shape=[max_points_per_instance])
return tf.map_fn(pad_parts_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.int32)
def _dense_pose_surface_coordinates(self, keys_to_tensors):
"""Creates a tensor that contains surface coords for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float32 tensor of shape [num_instances, num_points, 4] where each
point contains (y, x, v, u) data for each sampled DensePose point. The
(y, x) coordinate has normalized image locations for the point, and (v, u)
contains the surface coordinate (also normalized) for the part. The value
`num_points` corresponds to the maximum number of sampled points across
all instances in the image. Note that instances with less sampled points
will be padded with zeros in dim=1.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
dp_y = keys_to_tensors['image/object/densepose/y']
dp_x = keys_to_tensors['image/object/densepose/x']
dp_v = keys_to_tensors['image/object/densepose/v']
dp_u = keys_to_tensors['image/object/densepose/u']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(dp_y, tf.SparseTensor):
dp_y = tf.sparse_tensor_to_dense(dp_y)
if isinstance(dp_x, tf.SparseTensor):
dp_x = tf.sparse_tensor_to_dense(dp_x)
if isinstance(dp_v, tf.SparseTensor):
dp_v = tf.sparse_tensor_to_dense(dp_v)
if isinstance(dp_u, tf.SparseTensor):
dp_u = tf.sparse_tensor_to_dense(dp_u)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_surface_coordinates_tensor(instance_ind):
"""Pads DensePose surface coordinates for each instance."""
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
y = dp_y[points_range_start:points_range_end]
x = dp_x[points_range_start:points_range_end]
v = dp_v[points_range_start:points_range_end]
u = dp_u[points_range_start:points_range_end]
# Create [num_points_i, 4] tensor, where num_points_i is the number of
# sampled points for instance i.
unpadded_tensor = tf.stack([y, x, v, u], axis=1)
return shape_utils.pad_or_clip_nd(
unpadded_tensor, output_shape=[max_points_per_instance, 4])
return tf.map_fn(pad_surface_coordinates_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.float32)
def _expand_image_label_hierarchy(self, image_classes, image_confidences):
"""Expand image level labels according to the hierarchy.
Args:
image_classes: Int64 tensor with the image level class ids for a sample.
image_confidences: Float tensor signaling whether a class id is present in
the image (1.0) or not present (0.0).
Returns:
new_image_classes: Int64 tensor equal to expanding image_classes.
new_image_confidences: Float tensor equal to expanding image_confidences.
"""
def expand_labels(relation_tensor, confidence_value):
"""Expand to ancestors or descendants depending on arguments."""
mask = tf.equal(image_confidences, confidence_value)
target_image_classes = tf.boolean_mask(image_classes, mask)
expanded_indices = tf.reduce_any((tf.gather(
relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0),
axis=0)
expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET
new_groundtruth_image_classes = (
tf.concat([
tf.boolean_mask(image_classes, tf.logical_not(mask)),
expanded_indices,
],
axis=0))
new_groundtruth_image_confidences = (
tf.concat([
tf.boolean_mask(image_confidences, tf.logical_not(mask)),
tf.ones([tf.shape(expanded_indices)[0]],
dtype=image_confidences.dtype) * confidence_value,
],
axis=0))
return new_groundtruth_image_classes, new_groundtruth_image_confidences
image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0)
new_image_classes, new_image_confidences = expand_labels(
self._descendants_lut, 0.0)
return new_image_classes, new_image_confidences
def _expansion_box_field_labels(self,
object_classes,
object_field,
copy_class_id=False):
"""Expand the labels of a specific object field according to the hierarchy.
Args:
object_classes: Int64 tensor with the class id for each element in
object_field.
object_field: Tensor to be expanded.
copy_class_id: Boolean to choose whether to use class id values in the
output tensor instead of replicating the original values.
Returns:
A tensor with the result of expanding object_field.
"""
expanded_indices = tf.gather(
self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0)
if copy_class_id:
new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET
else:
new_object_field = tf.repeat(
object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0)
return new_object_field
| 49,800 | 44.273636 | 80 | py |
models | models-master/research/object_detection/data_decoders/tf_sequence_example_decoder_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_sequence_example_decoder.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import test_case
class TfSequenceExampleDecoderTest(test_case.TestCase):
def _create_label_map(self, path):
label_map_text = """
item {
name: "dog"
id: 1
}
item {
name: "cat"
id: 2
}
item {
name: "panda"
id: 4
}
"""
with tf.gfile.Open(path, 'wb') as f:
f.write(label_map_text)
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
return [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.execute(graph_fn, [])
return encoded_images
def test_decode_sequence_example(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
def test_decode_sequence_example_context(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_features = np.array(
[[0.0, 0.1, 0.2], [0.3, 0.4, 0.5]], dtype=np.float32)
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllClose(expected_context_features,
tensor_dict_out[flds.context_features])
def test_decode_sequence_example_context_image_id_list(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_image_ids = [b'im_1', b'im_2']
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_image_ids=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllEqual(expected_context_image_ids,
tensor_dict_out[flds.context_features_image_id_list])
def test_decode_sequence_example_negative_clip(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = -1 * np.ones((4, 0, 4))
expected_groundtruth_classes = -1 * np.ones((4, 0))
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
bboxes=[
[[]],
[[]],
[[]],
[[]]
],
label_strings=[
[],
[],
[],
[]
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
if __name__ == '__main__':
tf.test.main()
| 11,307 | 35.127796 | 80 | py |
models | models-master/research/object_detection/data_decoders/tf_example_decoder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
class TfExampleDecoderTest(test_case.TestCase):
def _create_encoded_and_decoded_data(self, data, encoding_type):
if encoding_type == 'jpeg':
encode_fn = tf.image.encode_jpeg
decode_fn = tf.image.decode_jpeg
elif encoding_type == 'png':
encode_fn = tf.image.encode_png
decode_fn = tf.image.decode_png
else:
raise ValueError('Invalid encoding type.')
def prepare_data_fn():
encoded_data = encode_fn(data)
decoded_data = decode_fn(encoded_data)
return encoded_data, decoded_data
return self.execute_cpu(prepare_data_fn, [])
def testDecodeAdditionalChannels(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8)
(encoded_additional_channel,
decoded_additional_channel) = self._create_encoded_and_decoded_data(
additional_channel, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/additional_channels/encoded':
dataset_util.bytes_list_feature(
[encoded_additional_channel] * 2),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=2)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
np.concatenate([decoded_additional_channel] * 2, axis=2),
tensor_dict[fields.InputDataFields.image_additional_channels])
def testDecodeJpegImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data(
image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/key/sha256':
dataset_util.bytes_feature(six.b('abc')),
'image/filename':
dataset_util.bytes_feature(six.b('filename'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key])
self.assertEqual(
six.b('filename'), tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, decoded_png = self._create_encoded_and_decoded_data(
image, 'png')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png')
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png')
encoded_masks = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
def testDecodeKeypointDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_depths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
keypoint_depth_weights = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/z':
dataset_util.float_list_feature(keypoint_depths),
'image/object/keypoint/z/weights':
dataset_util.float_list_feature(keypoint_depth_weights),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depths].get_shape(
).as_list()), [2, 3])
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depth_weights]
.get_shape().as_list()), [2, 3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoint_depths = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.assertAllClose(
expected_keypoint_depths,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
expected_keypoint_depth_weights = [[1.0, 0.9, 0.8], [0.7, 0.6, 0.5]]
self.assertAllClose(
expected_keypoint_depth_weights,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypointDepthNoDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoints_depth_default = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypoint(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [
[[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]],
[[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]]
self.assertAllClose(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = (
(np.array(keypoint_visibility) > 0).reshape((2, 3)))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeKeypointNoInstance(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = []
bbox_xmins = []
bbox_ymaxs = []
bbox_xmaxs = []
keypoint_ys = []
keypoint_xs = []
keypoint_visibility = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[0, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[0, 4], tensor_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0, 3, 2],
tensor_dict[fields.InputDataFields.groundtruth_keypoints].shape)
def testDecodeKeypointWithText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_texts = [
six.b('nose'), six.b('left_eye'), six.b('right_eye'), six.b('nose'),
six.b('left_eye'), six.b('right_eye')
]
label_map_string = """
item: {
id: 1
name: 'face'
display_name: 'face'
keypoints {
id: 0
label: "nose"
}
keypoints {
id: 2
label: "right_eye"
}
}
item: {
id: 2
name: 'person'
display_name: 'person'
keypoints {
id: 1
label: "left_eye"
}
}
"""
label_map_proto_file = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_proto_file, 'wb') as f:
f.write(label_map_string)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
'image/object/keypoint/text':
dataset_util.bytes_list_feature(keypoint_texts),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_proto_file, num_keypoints=5,
use_keypoint_label_map=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[None, 5, 2])
return output
output = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
output[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [[[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan],
[np.nan, np.nan], [np.nan, np.nan]],
[[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0],
[np.nan, np.nan], [np.nan, np.nan]]]
self.assertAllClose(expected_keypoints,
output[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = (
(np.array(keypoint_visibility) > 0).reshape((2, 3)))
gt_kpts_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
self.assertAllEqual(expected_visibility, output[gt_kpts_vis_fld][:, 0:3])
# The additional keypoints should all have False visibility.
self.assertAllEqual(
np.zeros([2, 2], dtype=bool), output[gt_kpts_vis_fld][:, 3:])
def testDecodeKeypointWithKptsLabelsNotInText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_texts = [
six.b('nose'), six.b('left_eye'), six.b('right_eye'), six.b('nose'),
six.b('left_eye'), six.b('right_eye')
]
label_map_string = """
item: {
id: 1
name: 'face'
display_name: 'face'
keypoints {
id: 0
label: "missing_part"
}
keypoints {
id: 2
label: "right_eye"
}
keypoints {
id: 3
label: "nose"
}
}
item: {
id: 2
name: 'person'
display_name: 'person'
keypoints {
id: 1
label: "left_eye"
}
}
"""
label_map_proto_file = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_proto_file, 'wb') as f:
f.write(label_map_string)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
'image/object/keypoint/text':
dataset_util.bytes_list_feature(keypoint_texts),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_proto_file, num_keypoints=5,
use_keypoint_label_map=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[None, 5, 2])
return output
output = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
output[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [[[np.nan, np.nan], [1., 2.], [np.nan, np.nan],
[0., 1.], [np.nan, np.nan]],
[[np.nan, np.nan], [np.nan, np.nan], [5., 6.],
[3., 4.], [np.nan, np.nan]]]
gt_kpts_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
self.assertAllClose(expected_keypoints,
output[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = [[False, True, False, True, False],
[False, False, True, True, False]]
gt_kpts_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
self.assertAllEqual(expected_visibility, output[gt_kpts_vis_fld])
def testDecodeKeypointNoVisibilities(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = (
np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2)))
self.assertAllEqual(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = np.ones((2, 3))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
flattened_multiclass_scores = [100., 50.] + [20., 30.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/multiclass_scores':
dataset_util.float_list_feature(
flattened_multiclass_scores),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flattened_multiclass_scores,
tensor_dict[fields.InputDataFields.multiclass_scores])
def testDecodeEmptyMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(
(0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape)
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
# Annotation label gets overridden by labelmap id.
annotated_bbox_classes = [3, 4]
expected_bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(annotated_bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
keypoints {
id: 0
label: "nose"
}
keypoints {
id: 1
label: "left_eye"
}
keypoints {
id: 2
label: "right_eye"
}
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(expected_bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
bbox_classes_id = [5, 6]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes_id),
})).SerializeToString()
label_map_string = """
item {
name:'/m/cat'
id:3
display_name:'cat'
}
item {
name:'/m/dog'
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_area = [100., 174.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/area':
dataset_util.float_list_feature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeVerifiedNegClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
neg_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/neg_category_ids':
dataset_util.int64_list_feature(neg_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
neg_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_verified_neg_classes])
def testDecodeNotExhaustiveClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
not_exhaustive_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/not_exhaustive_category_ids':
dataset_util.int64_list_feature(
not_exhaustive_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
not_exhaustive_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_not_exhaustive_classes])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_is_crowd = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/is_crowd':
dataset_util.int64_list_feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_is_crowd],
tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_difficult = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/difficult':
dataset_util.int64_list_feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_difficult].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_difficult],
tensor_dict[fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_group_of = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/group_of':
dataset_util.int64_list_feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_group_of].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_weights = [0.75, 1.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/weight':
dataset_util.float_list_feature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_weights].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
def testDecodeClassConfidence(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
class_confidence = [0.0, 1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/confidence':
dataset_util.float_list_feature(class_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_image_confidences]
.get_shape().as_list()), [3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
class_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_mask_weights],
[1, 1, 1, 1])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
tensor_dict)
def testDecodeInstanceSegmentationWithWeights(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
instance_mask_weights = np.array([1, 1, 0, 1], dtype=np.float32)
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/mask/weight':
dataset_util.float_list_feature(instance_mask_weights),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual(
output[fields.InputDataFields.groundtruth_instance_mask_weights],
[1, 1, 0, 1])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeImageLabels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
def graph_fn_1():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature(six.b('jpeg')),
'image/class/label': dataset_util.int64_list_feature([1, 2]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_1, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 2]))
def graph_fn_2():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/text':
dataset_util.bytes_list_feature(
[six.b('dog'), six.b('cat')]),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_2, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 3]))
def testDecodeContextFeatures(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 8
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_context_features=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(
context_features.reshape(num_features, context_feature_length),
tensor_dict[fields.InputDataFields.context_features])
self.assertAllEqual(
context_feature_length,
tensor_dict[fields.InputDataFields.context_feature_length])
def testContextFeaturesNotAvailableByDefault(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 10
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.context_features,
tensor_dict)
def testExpandLabels(self):
label_map_string = """
item {
id:1
name:'cat'
ancestor_ids: 2
}
item {
id:2
name:'animal'
descendant_ids: 1
}
item {
id:3
name:'man'
ancestor_ids: 5
}
item {
id:4
name:'woman'
display_name:'woman'
ancestor_ids: 5
}
item {
id:5
name:'person'
descendant_ids: 3
descendant_ids: 4
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
bbox_classes_text = [six.b('cat'), six.b('cat')]
bbox_group_of = [0, 1]
image_class_text = [six.b('cat'), six.b('person')]
image_confidence = [1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/group_of':
dataset_util.int64_list_feature(bbox_group_of),
'image/class/text':
dataset_util.bytes_list_feature(image_class_text),
'image/class/confidence':
dataset_util.float_list_feature(image_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path, expand_hierarchy_labels=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
expected_boxes = np.stack(
[boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0)
expected_boxes_class = np.array([1, 2, 1, 2])
expected_boxes_group_of = np.array([0, 0, 1, 1])
expected_image_class = np.array([1, 2, 3, 4, 5])
expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0])
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllEqual(expected_boxes_class,
tensor_dict[fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
expected_boxes_group_of,
tensor_dict[fields.InputDataFields.groundtruth_group_of])
self.assertAllEqual(
expected_image_class,
tensor_dict[fields.InputDataFields.groundtruth_image_classes])
self.assertAllEqual(
expected_image_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeDensePose(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
densepose_num = [0, 4, 2]
densepose_part_index = [2, 2, 3, 4, 2, 9]
densepose_x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
densepose_y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
densepose_u = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06]
densepose_v = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/densepose/num':
dataset_util.int64_list_feature(densepose_num),
'image/object/densepose/part_index':
dataset_util.int64_list_feature(densepose_part_index),
'image/object/densepose/x':
dataset_util.float_list_feature(densepose_x),
'image/object/densepose/y':
dataset_util.float_list_feature(densepose_y),
'image/object/densepose/u':
dataset_util.float_list_feature(densepose_u),
'image/object/densepose/v':
dataset_util.float_list_feature(densepose_v),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_dense_pose=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
dp_num_points = output[fields.InputDataFields.groundtruth_dp_num_points]
dp_part_ids = output[fields.InputDataFields.groundtruth_dp_part_ids]
dp_surface_coords = output[
fields.InputDataFields.groundtruth_dp_surface_coords]
return dp_num_points, dp_part_ids, dp_surface_coords
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
expected_dp_num_points = [0, 4, 2]
expected_dp_part_ids = [
[0, 0, 0, 0],
[2, 2, 3, 4],
[2, 9, 0, 0]
]
expected_dp_surface_coords = np.array(
[
# Instance 0 (no points).
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
# Instance 1 (4 points).
[[0.9, 0.1, 0.99, 0.01],
[0.8, 0.2, 0.98, 0.02],
[0.7, 0.3, 0.97, 0.03],
[0.6, 0.4, 0.96, 0.04]],
# Instance 2 (2 points).
[[0.5, 0.5, 0.95, 0.05],
[0.4, 0.6, 0.94, 0.06],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
], dtype=np.float32)
self.assertAllEqual(dp_num_points, expected_dp_num_points)
self.assertAllEqual(dp_part_ids, expected_dp_part_ids)
self.assertAllClose(dp_surface_coords, expected_dp_surface_coords)
def testDecodeTrack(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
track_labels = [0, 1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/track/label':
dataset_util.int64_list_feature(track_labels),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_track_id=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
track_ids = output[fields.InputDataFields.groundtruth_track_ids]
return track_ids
track_ids = self.execute_cpu(graph_fn, [])
expected_track_labels = [0, 1, 2]
self.assertAllEqual(track_ids, expected_track_labels)
if __name__ == '__main__':
tf.test.main()
| 79,936 | 38.928571 | 80 | py |
models | models-master/research/object_detection/data_decoders/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/delf/setup.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup script for delf."""
from setuptools import setup, find_packages
install_requires = [
'absl-py >= 0.7.1',
'protobuf >= 3.8.0',
'pandas >= 0.24.2',
'numpy >= 1.16.1',
'scipy >= 1.2.2',
'tensorflow >= 2.2.0',
'tf_slim >= 1.1',
'tensorflow_probability >= 0.9.0',
]
setup(
name='delf',
version='2.0',
include_package_data=True,
packages=find_packages(),
install_requires=install_requires,
description='DELF (DEep Local Features)',
)
| 1,184 | 30.184211 | 80 | py |
models | models-master/research/delf/delf/__init__.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to extract deep local features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.protos import aggregation_config_pb2
from delf.protos import box_pb2
from delf.protos import datum_pb2
from delf.protos import delf_config_pb2
from delf.protos import feature_pb2
from delf.python import box_io
from delf.python import datum_io
from delf.python import feature_aggregation_extractor
from delf.python import feature_aggregation_similarity
from delf.python import feature_extractor
from delf.python import feature_io
from delf.python import utils
from delf.python import whiten
from delf.python.examples import detector
from delf.python.examples import extractor
from delf.python import detect_to_retrieve
from delf.python import training
from delf.python.training import model
from delf.python import datasets
from delf.python.datasets import google_landmarks_dataset
from delf.python.datasets import revisited_op
# pylint: enable=unused-import
| 1,743 | 39.55814 | 80 | py |
models | models-master/research/delf/delf/python/feature_io.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for DelfFeatures proto.
Support read and write of DelfFeatures from/to numpy arrays and file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import feature_pb2
from delf import datum_io
def ArraysToDelfFeatures(locations,
scales,
descriptors,
attention,
orientations=None):
"""Converts DELF features to DelfFeatures proto.
Args:
locations: [N, 2] float array which denotes the selected keypoint locations.
N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
Returns:
delf_features: DelfFeatures object.
"""
num_features = len(attention)
assert num_features == locations.shape[0]
assert num_features == len(scales)
assert num_features == descriptors.shape[0]
if orientations is None:
orientations = np.zeros([num_features], dtype=np.float32)
else:
assert num_features == len(orientations)
delf_features = feature_pb2.DelfFeatures()
for i in range(num_features):
delf_feature = delf_features.feature.add()
delf_feature.y = locations[i, 0]
delf_feature.x = locations[i, 1]
delf_feature.scale = scales[i]
delf_feature.orientation = orientations[i]
delf_feature.strength = attention[i]
delf_feature.descriptor.CopyFrom(datum_io.ArrayToDatum(descriptors[i,]))
return delf_features
def DelfFeaturesToArrays(delf_features):
"""Converts data saved in DelfFeatures to numpy arrays.
If there are no features, the function returns four empty arrays.
Args:
delf_features: DelfFeatures object.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
num_features = len(delf_features.feature)
if num_features == 0:
return np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
# Figure out descriptor dimensionality by parsing first one.
descriptor_dim = len(
datum_io.DatumToArray(delf_features.feature[0].descriptor))
locations = np.zeros([num_features, 2])
scales = np.zeros([num_features])
descriptors = np.zeros([num_features, descriptor_dim])
attention = np.zeros([num_features])
orientations = np.zeros([num_features])
for i in range(num_features):
delf_feature = delf_features.feature[i]
locations[i, 0] = delf_feature.y
locations[i, 1] = delf_feature.x
scales[i] = delf_feature.scale
descriptors[i,] = datum_io.DatumToArray(delf_feature.descriptor)
attention[i] = delf_feature.strength
orientations[i] = delf_feature.orientation
return locations, scales, descriptors, attention, orientations
def SerializeToString(locations,
scales,
descriptors,
attention,
orientations=None):
"""Converts numpy arrays to serialized DelfFeatures.
Args:
locations: [N, 2] float array which denotes the selected keypoint locations.
N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
Returns:
Serialized DelfFeatures string.
"""
delf_features = ArraysToDelfFeatures(locations, scales, descriptors,
attention, orientations)
return delf_features.SerializeToString()
def ParseFromString(string):
"""Converts serialized DelfFeatures string to numpy arrays.
Args:
string: Serialized DelfFeatures string.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
delf_features = feature_pb2.DelfFeatures()
delf_features.ParseFromString(string)
return DelfFeaturesToArrays(delf_features)
def ReadFromFile(file_path):
"""Helper function to load data from a DelfFeatures format in a file.
Args:
file_path: Path to file containing data.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
with tf.io.gfile.GFile(file_path, 'rb') as f:
return ParseFromString(f.read())
def WriteToFile(file_path,
locations,
scales,
descriptors,
attention,
orientations=None):
"""Helper function to write data to a file in DelfFeatures format.
Args:
file_path: Path to file that will be written.
locations: [N, 2] float array which denotes the selected keypoint locations.
N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
"""
serialized_data = SerializeToString(locations, scales, descriptors, attention,
orientations)
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write(serialized_data)
| 6,911 | 34.086294 | 80 | py |
models | models-master/research/delf/delf/python/datum_io.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for DatumProto.
DatumProto is protocol buffer used to serialize tensor with arbitrary shape.
Please refer to datum.proto for details.
Support read and write of DatumProto from/to NumPy array and file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import datum_pb2
def ArrayToDatum(arr):
"""Converts NumPy array to DatumProto.
Supports arrays of types:
- float16 (it is converted into a float32 in DatumProto)
- float32
- float64 (it is converted into a float32 in DatumProto)
- uint8 (it is converted into a uint32 in DatumProto)
- uint16 (it is converted into a uint32 in DatumProto)
- uint32
- uint64 (it is converted into a uint32 in DatumProto)
Args:
arr: NumPy array of arbitrary shape.
Returns:
datum: DatumProto object.
Raises:
ValueError: If array type is unsupported.
"""
datum = datum_pb2.DatumProto()
if arr.dtype in ('float16', 'float32', 'float64'):
datum.float_list.value.extend(arr.astype('float32').flat)
elif arr.dtype in ('uint8', 'uint16', 'uint32', 'uint64'):
datum.uint32_list.value.extend(arr.astype('uint32').flat)
else:
raise ValueError('Unsupported array type: %s' % arr.dtype)
datum.shape.dim.extend(arr.shape)
return datum
def ArraysToDatumPair(arr_1, arr_2):
"""Converts numpy arrays to DatumPairProto.
Supports same formats as `ArrayToDatum`, see documentation therein.
Args:
arr_1: NumPy array of arbitrary shape.
arr_2: NumPy array of arbitrary shape.
Returns:
datum_pair: DatumPairProto object.
"""
datum_pair = datum_pb2.DatumPairProto()
datum_pair.first.CopyFrom(ArrayToDatum(arr_1))
datum_pair.second.CopyFrom(ArrayToDatum(arr_2))
return datum_pair
def DatumToArray(datum):
"""Converts data saved in DatumProto to NumPy array.
Args:
datum: DatumProto object.
Returns:
NumPy array of arbitrary shape.
"""
if datum.HasField('float_list'):
return np.array(datum.float_list.value).astype('float32').reshape(
datum.shape.dim)
elif datum.HasField('uint32_list'):
return np.array(datum.uint32_list.value).astype('uint32').reshape(
datum.shape.dim)
else:
raise ValueError('Input DatumProto does not have float_list or uint32_list')
def DatumPairToArrays(datum_pair):
"""Converts data saved in DatumPairProto to NumPy arrays.
Args:
datum_pair: DatumPairProto object.
Returns:
Two NumPy arrays of arbitrary shape.
"""
first_datum = DatumToArray(datum_pair.first)
second_datum = DatumToArray(datum_pair.second)
return first_datum, second_datum
def SerializeToString(arr):
"""Converts NumPy array to serialized DatumProto.
Args:
arr: NumPy array of arbitrary shape.
Returns:
Serialized DatumProto string.
"""
datum = ArrayToDatum(arr)
return datum.SerializeToString()
def SerializePairToString(arr_1, arr_2):
"""Converts pair of NumPy arrays to serialized DatumPairProto.
Args:
arr_1: NumPy array of arbitrary shape.
arr_2: NumPy array of arbitrary shape.
Returns:
Serialized DatumPairProto string.
"""
datum_pair = ArraysToDatumPair(arr_1, arr_2)
return datum_pair.SerializeToString()
def ParseFromString(string):
"""Converts serialized DatumProto string to NumPy array.
Args:
string: Serialized DatumProto string.
Returns:
NumPy array.
"""
datum = datum_pb2.DatumProto()
datum.ParseFromString(string)
return DatumToArray(datum)
def ParsePairFromString(string):
"""Converts serialized DatumPairProto string to NumPy arrays.
Args:
string: Serialized DatumProto string.
Returns:
Two NumPy arrays.
"""
datum_pair = datum_pb2.DatumPairProto()
datum_pair.ParseFromString(string)
return DatumPairToArrays(datum_pair)
def ReadFromFile(file_path):
"""Helper function to load data from a DatumProto format in a file.
Args:
file_path: Path to file containing data.
Returns:
data: NumPy array.
"""
with tf.io.gfile.GFile(file_path, 'rb') as f:
return ParseFromString(f.read())
def ReadPairFromFile(file_path):
"""Helper function to load data from a DatumPairProto format in a file.
Args:
file_path: Path to file containing data.
Returns:
Two NumPy arrays.
"""
with tf.io.gfile.GFile(file_path, 'rb') as f:
return ParsePairFromString(f.read())
def WriteToFile(data, file_path):
"""Helper function to write data to a file in DatumProto format.
Args:
data: NumPy array.
file_path: Path to file that will be written.
"""
serialized_data = SerializeToString(data)
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write(serialized_data)
def WritePairToFile(arr_1, arr_2, file_path):
"""Helper function to write pair of arrays to a file in DatumPairProto format.
Args:
arr_1: NumPy array of arbitrary shape.
arr_2: NumPy array of arbitrary shape.
file_path: Path to file that will be written.
"""
serialized_data = SerializePairToString(arr_1, arr_2)
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write(serialized_data)
| 5,867 | 25.432432 | 80 | py |
models | models-master/research/delf/delf/python/whiten_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for whitening module."""
import numpy as np
import tensorflow as tf
from delf import whiten
class WhitenTest(tf.test.TestCase):
def testApplyWhitening(self):
# Testing the application of the learned whitening.
vectors = np.array([[0.14022471, 0.96360618], [0.37601032, 0.25528411]])
# Learn whitening for the `vectors`. First element in the `vectors` is
# viewed is the example query and the second element is the corresponding
# positive.
mean_vector, projection = whiten.learn_whitening(vectors, [0], [1])
# Apply the computed whitening.
whitened_vectors = whiten.apply_whitening(vectors, mean_vector, projection)
expected_whitened_vectors = np.array([[0., 9.99999000e-01],
[0., -2.81240452e-13]])
# Compare the obtained whitened vectors with the expected result.
self.assertAllClose(whitened_vectors, expected_whitened_vectors)
def testLearnWhitening(self):
# Testing whitening learning function.
descriptors = np.array([[0.14022471, 0.96360618], [0.37601032, 0.25528411]])
# Obtain the mean descriptor vector and the projection matrix.
mean_vector, projection = whiten.learn_whitening(descriptors, [0], [1])
expected_mean_vector = np.array([[0.14022471], [0.37601032]])
expected_projection = np.array([[1.18894378e+00, -1.74326044e-01],
[1.45071361e+04, 9.89421193e+04]])
# Check that the both calculated values are close to the expected values.
self.assertAllClose(mean_vector, expected_mean_vector)
self.assertAllClose(projection, expected_projection)
def testCholeskyPositiveDefinite(self):
# Testing the Cholesky decomposition for the positive definite matrix.
descriptors = np.array([[1, -2j], [2j, 5]])
output = whiten.cholesky(descriptors)
expected_output = np.array([[1. + 0.j, 0. + 0.j], [0. + 2.j, 1. + 0.j]])
# Check that the expected output is obtained.
self.assertAllClose(output, expected_output)
# Check that the properties of the Cholesky decomposition are satisfied.
self.assertAllClose(np.matmul(output, output.T.conj()), descriptors)
def testCholeskyNonPositiveDefinite(self):
# Testing the Cholesky decomposition for a non-positive definite matrix.
input_matrix = np.array([[1., 2.], [-2., 1.]])
decomposition = whiten.cholesky(input_matrix)
expected_output = np.array([[2., -2.], [-2., 2.]])
# Check that the properties of the Cholesky decomposition are satisfied.
self.assertAllClose(
np.matmul(decomposition, decomposition.T), expected_output)
if __name__ == '__main__':
tf.test.main()
| 3,372 | 44.581081 | 80 | py |
models | models-master/research/delf/delf/python/feature_extractor.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def NormalizePixelValues(image,
pixel_value_offset=128.0,
pixel_value_scale=128.0):
"""Normalize image pixel values.
Args:
image: a uint8 tensor.
pixel_value_offset: a Python float, offset for normalizing pixel values.
pixel_value_scale: a Python float, scale for normalizing pixel values.
Returns:
image: a float32 tensor of the same shape as the input image.
"""
image = tf.cast(image, dtype=tf.float32)
image = tf.truediv(tf.subtract(image, pixel_value_offset), pixel_value_scale)
return image
def CalculateReceptiveBoxes(height, width, rf, stride, padding):
"""Calculate receptive boxes for each feature point.
Args:
height: The height of feature map.
width: The width of feature map.
rf: The receptive field size.
stride: The effective stride between two adjacent feature points.
padding: The effective padding size.
Returns:
rf_boxes: [N, 4] receptive boxes tensor. Here N equals to height x width.
Each box is represented by [ymin, xmin, ymax, xmax].
"""
x, y = tf.meshgrid(tf.range(width), tf.range(height))
coordinates = tf.reshape(tf.stack([y, x], axis=2), [-1, 2])
# [y,x,y,x]
point_boxes = tf.cast(
tf.concat([coordinates, coordinates], 1), dtype=tf.float32)
bias = [-padding, -padding, -padding + rf - 1, -padding + rf - 1]
rf_boxes = stride * point_boxes + bias
return rf_boxes
def CalculateKeypointCenters(boxes):
"""Helper function to compute feature centers, from RF boxes.
Args:
boxes: [N, 4] float tensor.
Returns:
centers: [N, 2] float tensor.
"""
return tf.divide(
tf.add(
tf.gather(boxes, [0, 1], axis=1), tf.gather(boxes, [2, 3], axis=1)),
2.0)
def ApplyPcaAndWhitening(data,
pca_matrix,
pca_mean,
output_dim,
use_whitening=False,
pca_variances=None):
"""Applies PCA/whitening to data.
Args:
data: [N, dim] float tensor containing data which undergoes PCA/whitening.
pca_matrix: [dim, dim] float tensor PCA matrix, row-major.
pca_mean: [dim] float tensor, mean to subtract before projection.
output_dim: Number of dimensions to use in output data, of type int.
use_whitening: Whether whitening is to be used.
pca_variances: [dim] float tensor containing PCA variances. Only used if
use_whitening is True.
Returns:
output: [N, output_dim] float tensor with output of PCA/whitening operation.
"""
output = tf.matmul(
tf.subtract(data, pca_mean),
tf.slice(pca_matrix, [0, 0], [output_dim, -1]),
transpose_b=True,
name='pca_matmul')
# Apply whitening if desired.
if use_whitening:
output = tf.divide(
output,
tf.sqrt(tf.slice(pca_variances, [0], [output_dim])),
name='whitening')
return output
def PostProcessDescriptors(descriptors, use_pca, pca_parameters=None):
"""Post-process descriptors.
Args:
descriptors: [N, input_dim] float tensor.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
final_descriptors: [N, output_dim] float tensor with descriptors after
normalization and (possibly) PCA/whitening.
"""
# L2-normalize, and if desired apply PCA (followed by L2-normalization).
final_descriptors = tf.nn.l2_normalize(
descriptors, axis=1, name='l2_normalization')
if use_pca:
# Apply PCA, and whitening if desired.
final_descriptors = ApplyPcaAndWhitening(final_descriptors,
pca_parameters['matrix'],
pca_parameters['mean'],
pca_parameters['dim'],
pca_parameters['use_whitening'],
pca_parameters['variances'])
# Re-normalize.
final_descriptors = tf.nn.l2_normalize(
final_descriptors, axis=1, name='pca_l2_normalization')
return final_descriptors
def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None):
"""Extract DELF features from input image.
Args:
boxes: [N, 4] float tensor which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
descriptors: [N, input_dim] float tensor.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
locations: [N, 2] float tensor which denotes the selected keypoint
locations.
final_descriptors: [N, output_dim] float tensor with DELF descriptors after
normalization and (possibly) PCA/whitening.
"""
# Get center of descriptor boxes, corresponding to feature locations.
locations = CalculateKeypointCenters(boxes)
final_descriptors = PostProcessDescriptors(descriptors, use_pca,
pca_parameters)
return locations, final_descriptors
| 6,181 | 34.125 | 80 | py |
models | models-master/research/delf/delf/python/feature_aggregation_extractor_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature aggregation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf import aggregation_config_pb2
from delf import feature_aggregation_extractor
FLAGS = flags.FLAGS
class FeatureAggregationTest(tf.test.TestCase):
def _CreateCodebook(self, checkpoint_path):
"""Creates codebook used in tests.
Args:
checkpoint_path: Directory where codebook is saved to.
"""
codebook = tf.Variable(
[[0.5, 0.5], [0.0, 0.0], [1.0, 0.0], [-0.5, -0.5], [0.0, 1.0]],
name='clusters',
dtype=tf.float32)
ckpt = tf.train.Checkpoint(codebook=codebook)
ckpt.write(checkpoint_path)
def setUp(self):
self._codebook_path = os.path.join(FLAGS.test_tmpdir, 'test_codebook')
self._CreateCodebook(self._codebook_path)
def testComputeNormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedVladWithBatchingWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.feature_batch_size = 2
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 1.0, 1.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [1.0, 1.0, 0.0, 0.0, 0.0, 2.0, -0.5, 0.5, 0.0, 0.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeVladEmptyFeaturesWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.158114, 0.158114, 0.316228, 0.816228
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladEmptyRegionsWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
num_features_per_region = np.array([])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.079057, 0.079057, 0.158114, 0.408114
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladMisconfiguredFeatures(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
# Misconfigured number of features; there are only 4 features, but
# sum(num_features_per_region) = 5.
num_features_per_region = np.array([3, 2])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
with self.assertRaisesRegex(
ValueError,
r'Incorrect arguments: sum\(num_features_per_region\) and '
r'features.shape\[0\] are different'):
extractor.Extract(features, num_features_per_region)
def testComputeAsmkWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [-0.707107, 0.707107, 0.707107, 0.707107]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkStarWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk_star, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(asmk_star, exp_asmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [0.707107, 0.707107, 0.0, 1.0, -0.707107, 0.707107]
exp_visual_words = [0, 2, 3]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rasmk, visual_words = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rasmk = [-0.707107, 0.707107, 0.361261, 0.932465]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(rasmk, exp_rasmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkStarWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rasmk_star, visual_words = extractor.Extract(features,
num_features_per_region)
# Define expected results.
exp_rasmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(rasmk_star, exp_rasmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeUnknownAggregation(self):
# Construct inputs.
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = 0
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
with self.assertRaisesRegex(ValueError, 'Invalid aggregation type'):
feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
if __name__ == '__main__':
tf.test.main()
| 18,304 | 35.979798 | 80 | py |
models | models-master/research/delf/delf/python/whiten.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Whitening learning functions."""
import os
import numpy as np
def apply_whitening(descriptors,
mean_descriptor_vector,
projection,
output_dim=None):
"""Applies the whitening to the descriptors as a post-processing step.
Args:
descriptors: [N, D] NumPy array of L2-normalized descriptors to be
post-processed.
mean_descriptor_vector: Mean descriptor vector.
projection: Whitening projection matrix.
output_dim: Integer, parameter for the dimensionality reduction. If
`output_dim` is None, the dimensionality reduction is not performed.
Returns:
descriptors_whitened: [N, output_dim] NumPy array of L2-normalized
descriptors `descriptors` after whitening application.
"""
eps = 1e-6
if output_dim is None:
output_dim = projection.shape[0]
descriptors = np.dot(projection[:output_dim, :],
descriptors - mean_descriptor_vector)
descriptors_whitened = descriptors / (
np.linalg.norm(descriptors, ord=2, axis=0, keepdims=True) + eps)
return descriptors_whitened
def learn_whitening(descriptors, qidxs, pidxs):
"""Learning the post-processing of fine-tuned descriptor vectors.
This method of whitening learning leverages the provided labeled data and
uses linear discriminant projections. The projection is decomposed into two
parts: whitening and rotation. The whitening part is the inverse of the
square-root of the intraclass (matching pairs) covariance matrix. The
rotation part is the PCA of the interclass (non-matching pairs) covariance
matrix in the whitened space. The described approach acts as a
post-processing step, equivalently, once the fine-tuning of the CNN is
finished. For more information about the method refer to the section 3.4
of https://arxiv.org/pdf/1711.02512.pdf.
Args:
descriptors: [N, D] NumPy array of L2-normalized descriptors.
qidxs: List of query indexes.
pidxs: List of positive pairs indexes.
Returns:
mean_descriptor_vector: [N, 1] NumPy array, mean descriptor vector.
projection: [N, N] NumPy array, whitening projection matrix.
"""
# Calculating the mean descriptor vector, which is used to perform centering.
mean_descriptor_vector = descriptors[:, qidxs].mean(axis=1, keepdims=True)
# Interclass (matching pairs) difference.
interclass_difference = descriptors[:, qidxs] - descriptors[:, pidxs]
covariance_matrix = (
np.dot(interclass_difference, interclass_difference.T) /
interclass_difference.shape[1])
# Whitening part.
projection = np.linalg.inv(cholesky(covariance_matrix))
projected_descriptors = np.dot(projection,
descriptors - mean_descriptor_vector)
non_matching_covariance_matrix = np.dot(projected_descriptors,
projected_descriptors.T)
eigval, eigvec = np.linalg.eig(non_matching_covariance_matrix)
order = eigval.argsort()[::-1]
eigvec = eigvec[:, order]
# Rotational part.
projection = np.dot(eigvec.T, projection)
return mean_descriptor_vector, projection
def cholesky(matrix):
"""Cholesky decomposition.
Cholesky decomposition suitable for non-positive definite matrices: involves
adding a small value `alpha` on the matrix diagonal until the matrix
becomes positive definite.
Args:
matrix: [K, K] Square matrix to be decomposed.
Returns:
decomposition: [K, K] Upper-triangular Cholesky factor of `matrix`,
a matrix with real and positive diagonal entries.
"""
alpha = 0
while True:
try:
# If the input parameter matrix is not positive-definite,
# the decomposition fails and we iteratively add a small value `alpha` on
# the matrix diagonal.
decomposition = np.linalg.cholesky(matrix + alpha * np.eye(*matrix.shape))
return decomposition
except np.linalg.LinAlgError:
if alpha == 0:
alpha = 1e-10
else:
alpha *= 10
print(">>>> {}::cholesky: Matrix is not positive definite, adding {:.0e} "
"on the diagonal".format(os.path.basename(__file__), alpha))
| 4,842 | 37.436508 | 80 | py |
models | models-master/research/delf/delf/python/box_io.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for Boxes proto.
Support read and write of Boxes from/to numpy arrays and file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import box_pb2
def ArraysToBoxes(boxes, scores, class_indices):
"""Converts `boxes` to Boxes proto.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
Returns:
boxes_proto: Boxes object.
"""
num_boxes = len(scores)
assert num_boxes == boxes.shape[0]
assert num_boxes == len(class_indices)
boxes_proto = box_pb2.Boxes()
for i in range(num_boxes):
boxes_proto.box.add(
ymin=boxes[i, 0],
xmin=boxes[i, 1],
ymax=boxes[i, 2],
xmax=boxes[i, 3],
score=scores[i],
class_index=class_indices[i])
return boxes_proto
def BoxesToArrays(boxes_proto):
"""Converts data saved in Boxes proto to numpy arrays.
If there are no boxes, the function returns three empty arrays.
Args:
boxes_proto: Boxes proto object.
Returns:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
"""
num_boxes = len(boxes_proto.box)
if num_boxes == 0:
return np.array([]), np.array([]), np.array([])
boxes = np.zeros([num_boxes, 4])
scores = np.zeros([num_boxes])
class_indices = np.zeros([num_boxes])
for i in range(num_boxes):
box_proto = boxes_proto.box[i]
boxes[i] = [box_proto.ymin, box_proto.xmin, box_proto.ymax, box_proto.xmax]
scores[i] = box_proto.score
class_indices[i] = box_proto.class_index
return boxes, scores, class_indices
def SerializeToString(boxes, scores, class_indices):
"""Converts numpy arrays to serialized Boxes.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
Returns:
Serialized Boxes string.
"""
boxes_proto = ArraysToBoxes(boxes, scores, class_indices)
return boxes_proto.SerializeToString()
def ParseFromString(string):
"""Converts serialized Boxes proto string to numpy arrays.
Args:
string: Serialized Boxes string.
Returns:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
"""
boxes_proto = box_pb2.Boxes()
boxes_proto.ParseFromString(string)
return BoxesToArrays(boxes_proto)
def ReadFromFile(file_path):
"""Helper function to load data from a Boxes proto format in a file.
Args:
file_path: Path to file containing data.
Returns:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
"""
with tf.io.gfile.GFile(file_path, 'rb') as f:
return ParseFromString(f.read())
def WriteToFile(file_path, boxes, scores, class_indices):
"""Helper function to write data to a file in Boxes proto format.
Args:
file_path: Path to file that will be written.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
"""
serialized_data = SerializeToString(boxes, scores, class_indices)
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write(serialized_data)
| 4,596 | 29.243421 | 80 | py |
models | models-master/research/delf/delf/python/feature_io_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature_io, the python interface of DelfFeatures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf import feature_io
FLAGS = flags.FLAGS
def create_data():
"""Creates data to be used in tests.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
locations = np.arange(8, dtype=np.float32).reshape(4, 2)
scales = np.arange(4, dtype=np.float32)
attention = np.arange(4, dtype=np.float32)
orientations = np.arange(4, dtype=np.float32)
descriptors = np.zeros([4, 1024])
descriptors[0,] = np.arange(1024)
descriptors[1,] = np.zeros([1024])
descriptors[2,] = np.ones([1024])
descriptors[3,] = -np.ones([1024])
return locations, scales, descriptors, attention, orientations
class DelfFeaturesIoTest(tf.test.TestCase):
def testConversionAndBack(self):
locations, scales, descriptors, attention, orientations = create_data()
serialized = feature_io.SerializeToString(locations, scales, descriptors,
attention, orientations)
parsed_data = feature_io.ParseFromString(serialized)
self.assertAllEqual(locations, parsed_data[0])
self.assertAllEqual(scales, parsed_data[1])
self.assertAllEqual(descriptors, parsed_data[2])
self.assertAllEqual(attention, parsed_data[3])
self.assertAllEqual(orientations, parsed_data[4])
def testConversionAndBackNoOrientations(self):
locations, scales, descriptors, attention, _ = create_data()
serialized = feature_io.SerializeToString(locations, scales, descriptors,
attention)
parsed_data = feature_io.ParseFromString(serialized)
self.assertAllEqual(locations, parsed_data[0])
self.assertAllEqual(scales, parsed_data[1])
self.assertAllEqual(descriptors, parsed_data[2])
self.assertAllEqual(attention, parsed_data[3])
self.assertAllEqual(np.zeros([4]), parsed_data[4])
def testWriteAndReadToFile(self):
locations, scales, descriptors, attention, orientations = create_data()
filename = os.path.join(FLAGS.test_tmpdir, 'test.delf')
feature_io.WriteToFile(filename, locations, scales, descriptors, attention,
orientations)
data_read = feature_io.ReadFromFile(filename)
self.assertAllEqual(locations, data_read[0])
self.assertAllEqual(scales, data_read[1])
self.assertAllEqual(descriptors, data_read[2])
self.assertAllEqual(attention, data_read[3])
self.assertAllEqual(orientations, data_read[4])
def testWriteAndReadToFileEmptyFile(self):
filename = os.path.join(FLAGS.test_tmpdir, 'test.delf')
feature_io.WriteToFile(filename, np.array([]), np.array([]), np.array([]),
np.array([]), np.array([]))
data_read = feature_io.ReadFromFile(filename)
self.assertAllEqual(np.array([]), data_read[0])
self.assertAllEqual(np.array([]), data_read[1])
self.assertAllEqual(np.array([]), data_read[2])
self.assertAllEqual(np.array([]), data_read[3])
self.assertAllEqual(np.array([]), data_read[4])
if __name__ == '__main__':
tf.test.main()
| 4,227 | 36.415929 | 80 | py |
models | models-master/research/delf/delf/python/utils.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for DELF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from PIL import Image
from PIL import ImageFile
import tensorflow as tf
# To avoid PIL crashing for truncated (corrupted) images.
ImageFile.LOAD_TRUNCATED_IMAGES = True
def RgbLoader(path):
"""Helper function to read image with PIL.
Args:
path: Path to image to be loaded.
Returns:
PIL image in RGB format.
"""
with tf.io.gfile.GFile(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def ResizeImage(image, config, resize_factor=1.0):
"""Resizes image according to config.
Args:
image: Uint8 array with shape (height, width, 3).
config: DelfConfig proto containing the model configuration.
resize_factor: Optional float resize factor for the input image. If given,
the maximum and minimum allowed image sizes in `config` are scaled by this
factor. Must be non-negative.
Returns:
resized_image: Uint8 array with resized image.
scale_factors: 2D float array, with factors used for resizing along height
and width (If upscaling, larger than 1; if downscaling, smaller than 1).
Raises:
ValueError: If `image` has incorrect number of dimensions/channels.
"""
if resize_factor < 0.0:
raise ValueError('negative resize_factor is not allowed: %f' %
resize_factor)
if image.ndim != 3:
raise ValueError('image has incorrect number of dimensions: %d' %
image.ndims)
height, width, channels = image.shape
# Take into account resize factor.
max_image_size = resize_factor * config.max_image_size
min_image_size = resize_factor * config.min_image_size
if channels != 3:
raise ValueError('image has incorrect number of channels: %d' % channels)
largest_side = max(width, height)
if max_image_size >= 0 and largest_side > max_image_size:
scale_factor = max_image_size / largest_side
elif min_image_size >= 0 and largest_side < min_image_size:
scale_factor = min_image_size / largest_side
elif config.use_square_images and (height != width):
scale_factor = 1.0
else:
# No resizing needed, early return.
return image, np.ones(2, dtype=float)
# Note that new_shape is in (width, height) format (PIL convention), while
# scale_factors are in (height, width) convention (NumPy convention).
if config.use_square_images:
new_shape = (int(round(largest_side * scale_factor)),
int(round(largest_side * scale_factor)))
else:
new_shape = (int(round(width * scale_factor)),
int(round(height * scale_factor)))
scale_factors = np.array([new_shape[1] / height, new_shape[0] / width],
dtype=float)
pil_image = Image.fromarray(image)
resized_image = np.array(pil_image.resize(new_shape, resample=Image.BILINEAR))
return resized_image, scale_factors
| 3,660 | 33.866667 | 80 | py |
models | models-master/research/delf/delf/python/feature_aggregation_extractor.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Local feature aggregation extraction.
For more details, please refer to the paper:
"Detect-to-Retrieve: Efficient Regional Aggregation for Image Search",
Proc. CVPR'19 (https://arxiv.org/abs/1812.01584).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import aggregation_config_pb2
_CLUSTER_CENTERS_VAR_NAME = "clusters"
_NORM_SQUARED_TOLERANCE = 1e-12
# Aliases for aggregation types.
_VLAD = aggregation_config_pb2.AggregationConfig.VLAD
_ASMK = aggregation_config_pb2.AggregationConfig.ASMK
_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR
class ExtractAggregatedRepresentation(object):
"""Class for extraction of aggregated local feature representation.
Args:
aggregation_config: AggregationConfig object defining type of aggregation to
use.
Raises:
ValueError: If aggregation type is invalid.
"""
def __init__(self, aggregation_config):
self._codebook_size = aggregation_config.codebook_size
self._feature_dimensionality = aggregation_config.feature_dimensionality
self._aggregation_type = aggregation_config.aggregation_type
self._feature_batch_size = aggregation_config.feature_batch_size
self._codebook_path = aggregation_config.codebook_path
self._use_regional_aggregation = aggregation_config.use_regional_aggregation
self._use_l2_normalization = aggregation_config.use_l2_normalization
self._num_assignments = aggregation_config.num_assignments
if self._aggregation_type not in [_VLAD, _ASMK, _ASMK_STAR]:
raise ValueError("Invalid aggregation type: %d" % self._aggregation_type)
# Load codebook
codebook = tf.Variable(
tf.zeros([self._codebook_size, self._feature_dimensionality],
dtype=tf.float32),
name=_CLUSTER_CENTERS_VAR_NAME)
ckpt = tf.train.Checkpoint(codebook=codebook)
ckpt.restore(self._codebook_path)
self._codebook = codebook
def Extract(self, features, num_features_per_region=None):
"""Extracts aggregated representation.
Args:
features: [N, D] float numpy array with N local feature descriptors.
num_features_per_region: Required only if computing regional aggregated
representations, otherwise optional. List of number of features per
region, such that sum(num_features_per_region) = N. It indicates which
features correspond to each region.
Returns:
aggregated_descriptors: 1-D numpy array.
feature_visual_words: Used only for ASMK/ASMK* aggregation type. 1-D
numpy array denoting visual words corresponding to the
`aggregated_descriptors`.
Raises:
ValueError: If inputs are misconfigured.
"""
features = tf.cast(features, dtype=tf.float32)
if num_features_per_region is None:
# Use dummy value since it is unused.
num_features_per_region = []
else:
num_features_per_region = tf.cast(num_features_per_region, dtype=tf.int32)
if len(num_features_per_region
) and sum(num_features_per_region) != features.shape[0]:
raise ValueError(
"Incorrect arguments: sum(num_features_per_region) and "
"features.shape[0] are different: %d vs %d" %
(sum(num_features_per_region), features.shape[0]))
# Extract features based on desired options.
if self._aggregation_type == _VLAD:
# Feature visual words are unused in the case of VLAD, so just return
# dummy constant.
feature_visual_words = tf.constant(-1, dtype=tf.int32)
if self._use_regional_aggregation:
aggregated_descriptors = self._ComputeRvlad(
features,
num_features_per_region,
self._codebook,
use_l2_normalization=self._use_l2_normalization,
num_assignments=self._num_assignments)
else:
aggregated_descriptors = self._ComputeVlad(
features,
self._codebook,
use_l2_normalization=self._use_l2_normalization,
num_assignments=self._num_assignments)
elif (self._aggregation_type == _ASMK or
self._aggregation_type == _ASMK_STAR):
if self._use_regional_aggregation:
(aggregated_descriptors,
feature_visual_words) = self._ComputeRasmk(
features,
num_features_per_region,
self._codebook,
num_assignments=self._num_assignments)
else:
(aggregated_descriptors,
feature_visual_words) = self._ComputeAsmk(
features,
self._codebook,
num_assignments=self._num_assignments)
feature_visual_words_output = feature_visual_words.numpy()
# If using ASMK*/RASMK*, binarize the aggregated descriptors.
if self._aggregation_type == _ASMK_STAR:
reshaped_aggregated_descriptors = np.reshape(
aggregated_descriptors, [-1, self._feature_dimensionality])
packed_descriptors = np.packbits(
reshaped_aggregated_descriptors > 0, axis=1)
aggregated_descriptors_output = np.reshape(packed_descriptors, [-1])
else:
aggregated_descriptors_output = aggregated_descriptors.numpy()
return aggregated_descriptors_output, feature_visual_words_output
def _ComputeVlad(self,
features,
codebook,
use_l2_normalization=True,
num_assignments=1):
"""Compute VLAD representation.
Args:
features: [N, D] float tensor.
codebook: [K, D] float tensor.
use_l2_normalization: If False, does not L2-normalize after aggregation.
num_assignments: Number of visual words to assign a feature to.
Returns:
vlad: [K*D] float tensor.
"""
def _ComputeVladEmptyFeatures():
"""Computes VLAD if `features` is empty.
Returns:
[K*D] all-zeros tensor.
"""
return tf.zeros([self._codebook_size * self._feature_dimensionality],
dtype=tf.float32)
def _ComputeVladNonEmptyFeatures():
"""Computes VLAD if `features` is not empty.
Returns:
[K*D] tensor with VLAD descriptor.
"""
num_features = tf.shape(features)[0]
# Find nearest visual words for each feature. Possibly batch the local
# features to avoid OOM.
if self._feature_batch_size <= 0:
actual_batch_size = num_features
else:
actual_batch_size = self._feature_batch_size
def _BatchNearestVisualWords(ind, selected_visual_words):
"""Compute nearest neighbor visual words for a batch of features.
Args:
ind: Integer index denoting feature.
selected_visual_words: Partial set of visual words.
Returns:
output_ind: Next index.
output_selected_visual_words: Updated set of visual words, including
the visual words for the new batch.
"""
# Handle case of last batch, where there may be fewer than
# `actual_batch_size` features.
batch_size_to_use = tf.cond(
tf.greater(ind + actual_batch_size, num_features),
true_fn=lambda: num_features - ind,
false_fn=lambda: actual_batch_size)
# Denote B = batch_size_to_use.
# K*B x D.
tiled_features = tf.reshape(
tf.tile(
tf.slice(features, [ind, 0],
[batch_size_to_use, self._feature_dimensionality]),
[1, self._codebook_size]), [-1, self._feature_dimensionality])
# K*B x D.
tiled_codebook = tf.reshape(
tf.tile(tf.reshape(codebook, [1, -1]), [batch_size_to_use, 1]),
[-1, self._feature_dimensionality])
# B x K.
squared_distances = tf.reshape(
tf.reduce_sum(
tf.math.squared_difference(tiled_features, tiled_codebook),
axis=1), [batch_size_to_use, self._codebook_size])
# B x K.
nearest_visual_words = tf.argsort(squared_distances)
# B x num_assignments.
batch_selected_visual_words = tf.slice(
nearest_visual_words, [0, 0], [batch_size_to_use, num_assignments])
selected_visual_words = tf.concat(
[selected_visual_words, batch_selected_visual_words], axis=0)
return ind + batch_size_to_use, selected_visual_words
ind_batch = tf.constant(0, dtype=tf.int32)
keep_going = lambda j, selected_visual_words: tf.less(j, num_features)
selected_visual_words = tf.zeros([0, num_assignments], dtype=tf.int32)
_, selected_visual_words = tf.while_loop(
cond=keep_going,
body=_BatchNearestVisualWords,
loop_vars=[ind_batch, selected_visual_words],
shape_invariants=[
ind_batch.get_shape(),
tf.TensorShape([None, num_assignments])
],
parallel_iterations=1,
back_prop=False)
# Helper function to collect residuals for relevant visual words.
def _ConstructVladFromAssignments(ind, vlad):
"""Add contributions of a feature to a VLAD descriptor.
Args:
ind: Integer index denoting feature.
vlad: Partial VLAD descriptor.
Returns:
output_ind: Next index (ie, ind+1).
output_vlad: VLAD descriptor updated to take into account contribution
from ind-th feature.
"""
diff = tf.tile(
tf.expand_dims(features[ind],
axis=0), [num_assignments, 1]) - tf.gather(
codebook, selected_visual_words[ind])
return ind + 1, tf.tensor_scatter_nd_add(
vlad, tf.expand_dims(selected_visual_words[ind], axis=1), diff)
ind_vlad = tf.constant(0, dtype=tf.int32)
keep_going = lambda j, vlad: tf.less(j, num_features)
vlad = tf.zeros([self._codebook_size, self._feature_dimensionality],
dtype=tf.float32)
_, vlad = tf.while_loop(
cond=keep_going,
body=_ConstructVladFromAssignments,
loop_vars=[ind_vlad, vlad],
back_prop=False)
vlad = tf.reshape(vlad,
[self._codebook_size * self._feature_dimensionality])
if use_l2_normalization:
vlad = tf.math.l2_normalize(vlad, epsilon=_NORM_SQUARED_TOLERANCE)
return vlad
return tf.cond(
tf.greater(tf.size(features), 0),
true_fn=_ComputeVladNonEmptyFeatures,
false_fn=_ComputeVladEmptyFeatures)
def _ComputeRvlad(self,
features,
num_features_per_region,
codebook,
use_l2_normalization=False,
num_assignments=1):
"""Compute R-VLAD representation.
Args:
features: [N, D] float tensor.
num_features_per_region: [R] int tensor. Contains number of features per
region, such that sum(num_features_per_region) = N. It indicates which
features correspond to each region.
codebook: [K, D] float tensor.
use_l2_normalization: If True, performs L2-normalization after regional
aggregation; if False (default), performs componentwise division by R
after regional aggregation.
num_assignments: Number of visual words to assign a feature to.
Returns:
rvlad: [K*D] float tensor.
"""
def _ComputeRvladEmptyRegions():
"""Computes R-VLAD if `num_features_per_region` is empty.
Returns:
[K*D] all-zeros tensor.
"""
return tf.zeros([self._codebook_size * self._feature_dimensionality],
dtype=tf.float32)
def _ComputeRvladNonEmptyRegions():
"""Computes R-VLAD if `num_features_per_region` is not empty.
Returns:
[K*D] tensor with R-VLAD descriptor.
"""
# Helper function to compose initial R-VLAD from image regions.
def _ConstructRvladFromVlad(ind, rvlad):
"""Add contributions from different regions into R-VLAD.
Args:
ind: Integer index denoting region.
rvlad: Partial R-VLAD descriptor.
Returns:
output_ind: Next index (ie, ind+1).
output_rvlad: R-VLAD descriptor updated to take into account
contribution from ind-th region.
"""
return ind + 1, rvlad + self._ComputeVlad(
tf.slice(
features, [tf.reduce_sum(num_features_per_region[:ind]), 0],
[num_features_per_region[ind], self._feature_dimensionality]),
codebook,
num_assignments=num_assignments)
i = tf.constant(0, dtype=tf.int32)
num_regions = tf.shape(num_features_per_region)[0]
keep_going = lambda j, rvlad: tf.less(j, num_regions)
rvlad = tf.zeros([self._codebook_size * self._feature_dimensionality],
dtype=tf.float32)
_, rvlad = tf.while_loop(
cond=keep_going,
body=_ConstructRvladFromVlad,
loop_vars=[i, rvlad],
back_prop=False,
parallel_iterations=1)
if use_l2_normalization:
rvlad = tf.math.l2_normalize(rvlad, epsilon=_NORM_SQUARED_TOLERANCE)
else:
rvlad /= tf.cast(num_regions, dtype=tf.float32)
return rvlad
return tf.cond(
tf.greater(tf.size(num_features_per_region), 0),
true_fn=_ComputeRvladNonEmptyRegions,
false_fn=_ComputeRvladEmptyRegions)
def _PerCentroidNormalization(self, unnormalized_vector):
"""Perform per-centroid normalization.
Args:
unnormalized_vector: [KxD] float tensor.
Returns:
per_centroid_normalized_vector: [KxD] float tensor, with normalized
aggregated residuals. Some residuals may be all-zero.
visual_words: Int tensor containing indices of visual words which are
present for the set of features.
"""
unnormalized_vector = tf.reshape(
unnormalized_vector,
[self._codebook_size, self._feature_dimensionality])
per_centroid_norms = tf.norm(unnormalized_vector, axis=1)
visual_words = tf.reshape(
tf.where(
tf.greater(per_centroid_norms, tf.sqrt(_NORM_SQUARED_TOLERANCE))),
[-1])
per_centroid_normalized_vector = tf.math.l2_normalize(
unnormalized_vector, axis=1, epsilon=_NORM_SQUARED_TOLERANCE)
return per_centroid_normalized_vector, visual_words
def _ComputeAsmk(self, features, codebook, num_assignments=1):
"""Compute ASMK representation.
Args:
features: [N, D] float tensor.
codebook: [K, D] float tensor.
num_assignments: Number of visual words to assign a feature to.
Returns:
normalized_residuals: 1-dimensional float tensor with concatenated
residuals which are non-zero. Note that the dimensionality is
input-dependent.
visual_words: 1-dimensional int tensor of sorted visual word ids.
Dimensionality is shape(normalized_residuals)[0] / D.
"""
unnormalized_vlad = self._ComputeVlad(
features,
codebook,
use_l2_normalization=False,
num_assignments=num_assignments)
per_centroid_normalized_vlad, visual_words = self._PerCentroidNormalization(
unnormalized_vlad)
normalized_residuals = tf.reshape(
tf.gather(per_centroid_normalized_vlad, visual_words),
[tf.shape(visual_words)[0] * self._feature_dimensionality])
return normalized_residuals, visual_words
def _ComputeRasmk(self,
features,
num_features_per_region,
codebook,
num_assignments=1):
"""Compute R-ASMK representation.
Args:
features: [N, D] float tensor.
num_features_per_region: [R] int tensor. Contains number of features per
region, such that sum(num_features_per_region) = N. It indicates which
features correspond to each region.
codebook: [K, D] float tensor.
num_assignments: Number of visual words to assign a feature to.
Returns:
normalized_residuals: 1-dimensional float tensor with concatenated
residuals which are non-zero. Note that the dimensionality is
input-dependent.
visual_words: 1-dimensional int tensor of sorted visual word ids.
Dimensionality is shape(normalized_residuals)[0] / D.
"""
unnormalized_rvlad = self._ComputeRvlad(
features,
num_features_per_region,
codebook,
use_l2_normalization=False,
num_assignments=num_assignments)
(per_centroid_normalized_rvlad,
visual_words) = self._PerCentroidNormalization(unnormalized_rvlad)
normalized_residuals = tf.reshape(
tf.gather(per_centroid_normalized_rvlad, visual_words),
[tf.shape(visual_words)[0] * self._feature_dimensionality])
return normalized_residuals, visual_words
| 17,644 | 36.30444 | 80 | py |
models | models-master/research/delf/delf/python/feature_aggregation_similarity_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature aggregation similarity."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import aggregation_config_pb2
from delf import feature_aggregation_similarity
class FeatureAggregationSimilarityTest(tf.test.TestCase):
def testComputeVladSimilarityWorks(self):
# Construct inputs.
vlad_1 = np.array([0, 1, 2, 3, 4])
vlad_2 = np.array([5, 6, 7, 8, 9])
config = aggregation_config_pb2.AggregationConfig()
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
# Run tested function.
similarity_computer = (
feature_aggregation_similarity.SimilarityAggregatedRepresentation(
config))
similarity = similarity_computer.ComputeSimilarity(vlad_1, vlad_2)
# Define expected results.
exp_similarity = 80
# Compare actual and expected results.
self.assertAllEqual(similarity, exp_similarity)
def testComputeAsmkSimilarityWorks(self):
# Construct inputs.
aggregated_descriptors_1 = np.array([
0.0, 0.0, -0.707107, -0.707107, 0.5, 0.866025, 0.816497, 0.577350, 1.0,
0.0
])
visual_words_1 = np.array([0, 1, 2, 3, 4])
aggregated_descriptors_2 = np.array(
[0.0, 1.0, 1.0, 0.0, 0.707107, 0.707107])
visual_words_2 = np.array([1, 2, 4])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.use_l2_normalization = True
# Run tested function.
similarity_computer = (
feature_aggregation_similarity.SimilarityAggregatedRepresentation(
config))
similarity = similarity_computer.ComputeSimilarity(
aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1,
visual_words_2)
# Define expected results.
exp_similarity = 0.123562
# Compare actual and expected results.
self.assertAllClose(similarity, exp_similarity)
def testComputeAsmkSimilarityNoNormalizationWorks(self):
# Construct inputs.
aggregated_descriptors_1 = np.array([
0.0, 0.0, -0.707107, -0.707107, 0.5, 0.866025, 0.816497, 0.577350, 1.0,
0.0
])
visual_words_1 = np.array([0, 1, 2, 3, 4])
aggregated_descriptors_2 = np.array(
[0.0, 1.0, 1.0, 0.0, 0.707107, 0.707107])
visual_words_2 = np.array([1, 2, 4])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.use_l2_normalization = False
# Run tested function.
similarity_computer = (
feature_aggregation_similarity.SimilarityAggregatedRepresentation(
config))
similarity = similarity_computer.ComputeSimilarity(
aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1,
visual_words_2)
# Define expected results.
exp_similarity = 0.478554
# Compare actual and expected results.
self.assertAllClose(similarity, exp_similarity)
def testComputeAsmkStarSimilarityWorks(self):
# Construct inputs.
aggregated_descriptors_1 = np.array([0, 0, 3, 3, 3], dtype='uint8')
visual_words_1 = np.array([0, 1, 2, 3, 4])
aggregated_descriptors_2 = np.array([1, 2, 3], dtype='uint8')
visual_words_2 = np.array([1, 2, 4])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.use_l2_normalization = True
# Run tested function.
similarity_computer = (
feature_aggregation_similarity.SimilarityAggregatedRepresentation(
config))
similarity = similarity_computer.ComputeSimilarity(
aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1,
visual_words_2)
# Define expected results.
exp_similarity = 0.258199
# Compare actual and expected results.
self.assertAllClose(similarity, exp_similarity)
if __name__ == '__main__':
tf.test.main()
| 4,954 | 34.905797 | 80 | py |
models | models-master/research/delf/delf/python/feature_extractor_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from delf import feature_extractor
class FeatureExtractorTest(tf.test.TestCase):
def testNormalizePixelValues(self):
image = tf.constant(
[[[3, 255, 0], [34, 12, 5]], [[45, 5, 65], [56, 77, 89]]],
dtype=tf.uint8)
normalized_image = feature_extractor.NormalizePixelValues(
image, pixel_value_offset=5.0, pixel_value_scale=2.0)
exp_normalized_image = [[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]],
[[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]]
self.assertAllEqual(normalized_image, exp_normalized_image)
def testCalculateReceptiveBoxes(self):
boxes = feature_extractor.CalculateReceptiveBoxes(
height=1, width=2, rf=291, stride=32, padding=145)
exp_boxes = [[-145., -145., 145., 145.], [-145., -113., 145., 177.]]
self.assertAllEqual(exp_boxes, boxes)
def testCalculateKeypointCenters(self):
boxes = [[-10.0, 0.0, 11.0, 21.0], [-2.5, 5.0, 18.5, 26.0],
[45.0, -2.5, 66.0, 18.5]]
centers = feature_extractor.CalculateKeypointCenters(boxes)
exp_centers = [[0.5, 10.5], [8.0, 15.5], [55.5, 8.0]]
self.assertAllEqual(exp_centers, centers)
def testPcaWhitening(self):
data = tf.constant([[1.0, 2.0, -2.0], [-5.0, 0.0, 3.0], [-1.0, 2.0, 0.0],
[0.0, 4.0, -1.0]])
pca_matrix = tf.constant([[2.0, 0.0, -1.0], [0.0, 1.0, 1.0],
[-1.0, 1.0, 3.0]])
pca_mean = tf.constant([1.0, 2.0, 3.0])
output_dim = 2
use_whitening = True
pca_variances = tf.constant([4.0, 1.0])
output = feature_extractor.ApplyPcaAndWhitening(data, pca_matrix, pca_mean,
output_dim, use_whitening,
pca_variances)
exp_output = [[2.5, -5.0], [-6.0, -2.0], [-0.5, -3.0], [1.0, -2.0]]
self.assertAllEqual(exp_output, output)
if __name__ == '__main__':
tf.test.main()
| 2,807 | 35.947368 | 80 | py |
models | models-master/research/delf/delf/python/box_io_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_io, the python interface of Boxes proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf import box_io
FLAGS = flags.FLAGS
class BoxesIoTest(tf.test.TestCase):
def _create_data(self):
"""Creates data to be used in tests.
Returns:
boxes: [N, 4] float array denoting bounding box coordinates, in format
[top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
"""
boxes = np.arange(24, dtype=np.float32).reshape(6, 4)
scores = np.arange(6, dtype=np.float32)
class_indices = np.arange(6, dtype=np.int32)
return boxes, scores, class_indices
def testConversionAndBack(self):
boxes, scores, class_indices = self._create_data()
serialized = box_io.SerializeToString(boxes, scores, class_indices)
parsed_data = box_io.ParseFromString(serialized)
self.assertAllEqual(boxes, parsed_data[0])
self.assertAllEqual(scores, parsed_data[1])
self.assertAllEqual(class_indices, parsed_data[2])
def testWriteAndReadToFile(self):
boxes, scores, class_indices = self._create_data()
filename = os.path.join(FLAGS.test_tmpdir, 'test.boxes')
box_io.WriteToFile(filename, boxes, scores, class_indices)
data_read = box_io.ReadFromFile(filename)
self.assertAllEqual(boxes, data_read[0])
self.assertAllEqual(scores, data_read[1])
self.assertAllEqual(class_indices, data_read[2])
def testWriteAndReadToFileEmptyFile(self):
filename = os.path.join(FLAGS.test_tmpdir, 'test.box')
box_io.WriteToFile(filename, np.array([]), np.array([]), np.array([]))
data_read = box_io.ReadFromFile(filename)
self.assertAllEqual(np.array([]), data_read[0])
self.assertAllEqual(np.array([]), data_read[1])
self.assertAllEqual(np.array([]), data_read[2])
if __name__ == '__main__':
tf.test.main()
| 2,736 | 31.975904 | 80 | py |
models | models-master/research/delf/delf/python/feature_aggregation_similarity.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Local feature aggregation similarity computation.
For more details, please refer to the paper:
"Detect-to-Retrieve: Efficient Regional Aggregation for Image Search",
Proc. CVPR'19 (https://arxiv.org/abs/1812.01584).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from delf import aggregation_config_pb2
# Aliases for aggregation types.
_VLAD = aggregation_config_pb2.AggregationConfig.VLAD
_ASMK = aggregation_config_pb2.AggregationConfig.ASMK
_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR
class SimilarityAggregatedRepresentation(object):
"""Class for computing similarity of aggregated local feature representations.
Args:
aggregation_config: AggregationConfig object defining type of aggregation to
use.
Raises:
ValueError: If aggregation type is invalid.
"""
def __init__(self, aggregation_config):
self._feature_dimensionality = aggregation_config.feature_dimensionality
self._aggregation_type = aggregation_config.aggregation_type
# Only relevant if using ASMK/ASMK*. Otherwise, ignored.
self._use_l2_normalization = aggregation_config.use_l2_normalization
self._alpha = aggregation_config.alpha
self._tau = aggregation_config.tau
# Only relevant if using ASMK*. Otherwise, ignored.
self._number_bits = np.array([bin(n).count('1') for n in range(256)])
def ComputeSimilarity(self,
aggregated_descriptors_1,
aggregated_descriptors_2,
feature_visual_words_1=None,
feature_visual_words_2=None):
"""Computes similarity between aggregated descriptors.
Args:
aggregated_descriptors_1: 1-D NumPy array.
aggregated_descriptors_2: 1-D NumPy array.
feature_visual_words_1: Used only for ASMK/ASMK* aggregation type. 1-D
sorted NumPy integer array denoting visual words corresponding to
`aggregated_descriptors_1`.
feature_visual_words_2: Used only for ASMK/ASMK* aggregation type. 1-D
sorted NumPy integer array denoting visual words corresponding to
`aggregated_descriptors_2`.
Returns:
similarity: Float. The larger, the more similar.
Raises:
ValueError: If aggregation type is invalid.
"""
if self._aggregation_type == _VLAD:
similarity = np.dot(aggregated_descriptors_1, aggregated_descriptors_2)
elif self._aggregation_type == _ASMK:
similarity = self._AsmkSimilarity(
aggregated_descriptors_1,
aggregated_descriptors_2,
feature_visual_words_1,
feature_visual_words_2,
binarized=False)
elif self._aggregation_type == _ASMK_STAR:
similarity = self._AsmkSimilarity(
aggregated_descriptors_1,
aggregated_descriptors_2,
feature_visual_words_1,
feature_visual_words_2,
binarized=True)
else:
raise ValueError('Invalid aggregation type: %d' % self._aggregation_type)
return similarity
def _CheckAsmkDimensionality(self, aggregated_descriptors, num_visual_words,
descriptor_name):
"""Checks that ASMK dimensionality is as expected.
Args:
aggregated_descriptors: 1-D NumPy array.
num_visual_words: Integer.
descriptor_name: String.
Raises:
ValueError: If descriptor dimensionality is incorrect.
"""
if len(aggregated_descriptors
) / num_visual_words != self._feature_dimensionality:
raise ValueError(
'Feature dimensionality for aggregated descriptor %s is invalid: %d;'
' expected %d.' % (descriptor_name, len(aggregated_descriptors) /
num_visual_words, self._feature_dimensionality))
def _SigmaFn(self, x):
"""Selectivity ASMK/ASMK* similarity function.
Args:
x: Scalar or 1-D NumPy array.
Returns:
result: Same type as input, with output of selectivity function.
"""
if np.isscalar(x):
if x > self._tau:
result = np.sign(x) * np.power(np.absolute(x), self._alpha)
else:
result = 0.0
else:
result = np.zeros_like(x)
above_tau = np.nonzero(x > self._tau)
result[above_tau] = np.sign(x[above_tau]) * np.power(
np.absolute(x[above_tau]), self._alpha)
return result
def _BinaryNormalizedInnerProduct(self, descriptors_1, descriptors_2):
"""Computes normalized binary inner product.
Args:
descriptors_1: 1-D NumPy integer array.
descriptors_2: 1-D NumPy integer array.
Returns:
inner_product: Float.
Raises:
ValueError: If the dimensionality of descriptors is different.
"""
num_descriptors = len(descriptors_1)
if num_descriptors != len(descriptors_2):
raise ValueError(
'Descriptors have incompatible dimensionality: %d vs %d' %
(len(descriptors_1), len(descriptors_2)))
h = 0
for i in range(num_descriptors):
h += self._number_bits[np.bitwise_xor(descriptors_1[i], descriptors_2[i])]
# If local feature dimensionality is lower than 8, then use that to compute
# proper binarized inner product.
bits_per_descriptor = min(self._feature_dimensionality, 8)
total_num_bits = bits_per_descriptor * num_descriptors
return 1.0 - 2.0 * h / total_num_bits
def _AsmkSimilarity(self,
aggregated_descriptors_1,
aggregated_descriptors_2,
visual_words_1,
visual_words_2,
binarized=False):
"""Compute ASMK-based similarity.
If `aggregated_descriptors_1` or `aggregated_descriptors_2` is empty, we
return a similarity of -1.0.
If binarized is True, `aggregated_descriptors_1` and
`aggregated_descriptors_2` must be of type uint8.
Args:
aggregated_descriptors_1: 1-D NumPy array.
aggregated_descriptors_2: 1-D NumPy array.
visual_words_1: 1-D sorted NumPy integer array denoting visual words
corresponding to `aggregated_descriptors_1`.
visual_words_2: 1-D sorted NumPy integer array denoting visual words
corresponding to `aggregated_descriptors_2`.
binarized: If True, compute ASMK* similarity.
Returns:
similarity: Float. The larger, the more similar.
Raises:
ValueError: If input descriptor dimensionality is inconsistent, or if
descriptor type is unsupported.
"""
num_visual_words_1 = len(visual_words_1)
num_visual_words_2 = len(visual_words_2)
if not num_visual_words_1 or not num_visual_words_2:
return -1.0
# Parse dimensionality used per visual word. They must be the same for both
# aggregated descriptors. If using ASMK, they also must be equal to
# self._feature_dimensionality.
if binarized:
if aggregated_descriptors_1.dtype != 'uint8':
raise ValueError('Incorrect input descriptor type: %s' %
aggregated_descriptors_1.dtype)
if aggregated_descriptors_2.dtype != 'uint8':
raise ValueError('Incorrect input descriptor type: %s' %
aggregated_descriptors_2.dtype)
per_visual_word_dimensionality = int(
len(aggregated_descriptors_1) / num_visual_words_1)
if len(aggregated_descriptors_2
) / num_visual_words_2 != per_visual_word_dimensionality:
raise ValueError('ASMK* dimensionality is inconsistent.')
else:
per_visual_word_dimensionality = self._feature_dimensionality
self._CheckAsmkDimensionality(aggregated_descriptors_1,
num_visual_words_1, '1')
self._CheckAsmkDimensionality(aggregated_descriptors_2,
num_visual_words_2, '2')
aggregated_descriptors_1_reshape = np.reshape(
aggregated_descriptors_1,
[num_visual_words_1, per_visual_word_dimensionality])
aggregated_descriptors_2_reshape = np.reshape(
aggregated_descriptors_2,
[num_visual_words_2, per_visual_word_dimensionality])
# Loop over visual words, compute similarity.
unnormalized_similarity = 0.0
ind_1 = 0
ind_2 = 0
while ind_1 < num_visual_words_1 and ind_2 < num_visual_words_2:
if visual_words_1[ind_1] == visual_words_2[ind_2]:
if binarized:
inner_product = self._BinaryNormalizedInnerProduct(
aggregated_descriptors_1_reshape[ind_1],
aggregated_descriptors_2_reshape[ind_2])
else:
inner_product = np.dot(aggregated_descriptors_1_reshape[ind_1],
aggregated_descriptors_2_reshape[ind_2])
unnormalized_similarity += self._SigmaFn(inner_product)
ind_1 += 1
ind_2 += 1
elif visual_words_1[ind_1] > visual_words_2[ind_2]:
ind_2 += 1
else:
ind_1 += 1
final_similarity = unnormalized_similarity
if self._use_l2_normalization:
final_similarity /= np.sqrt(num_visual_words_1 * num_visual_words_2)
return final_similarity
| 9,841 | 36 | 80 | py |
models | models-master/research/delf/delf/python/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/delf/delf/python/datum_io_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for datum_io, the python interface of DatumProto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf import datum_io
FLAGS = flags.FLAGS
class DatumIoTest(tf.test.TestCase):
def Conversion2dTestWithType(self, dtype):
original_data = np.arange(9).reshape(3, 3).astype(dtype)
serialized = datum_io.SerializeToString(original_data)
retrieved_data = datum_io.ParseFromString(serialized)
self.assertTrue(np.array_equal(original_data, retrieved_data))
def Conversion3dTestWithType(self, dtype):
original_data = np.arange(24).reshape(2, 3, 4).astype(dtype)
serialized = datum_io.SerializeToString(original_data)
retrieved_data = datum_io.ParseFromString(serialized)
self.assertTrue(np.array_equal(original_data, retrieved_data))
# This test covers the following functions: ArrayToDatum, SerializeToString,
# ParseFromString, DatumToArray.
def testConversion2dWithType(self):
self.Conversion2dTestWithType(np.uint16)
self.Conversion2dTestWithType(np.uint32)
self.Conversion2dTestWithType(np.uint64)
self.Conversion2dTestWithType(np.float16)
self.Conversion2dTestWithType(np.float32)
self.Conversion2dTestWithType(np.float64)
# This test covers the following functions: ArrayToDatum, SerializeToString,
# ParseFromString, DatumToArray.
def testConversion3dWithType(self):
self.Conversion3dTestWithType(np.uint16)
self.Conversion3dTestWithType(np.uint32)
self.Conversion3dTestWithType(np.uint64)
self.Conversion3dTestWithType(np.float16)
self.Conversion3dTestWithType(np.float32)
self.Conversion3dTestWithType(np.float64)
def testConversionWithUnsupportedType(self):
with self.assertRaisesRegex(ValueError, 'Unsupported array type'):
self.Conversion3dTestWithType(int)
# This test covers the following functions: ArrayToDatum, SerializeToString,
# WriteToFile, ReadFromFile, ParseFromString, DatumToArray.
def testWriteAndReadToFile(self):
data = np.array([[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]],
[[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]])
filename = os.path.join(FLAGS.test_tmpdir, 'test.datum')
datum_io.WriteToFile(data, filename)
data_read = datum_io.ReadFromFile(filename)
self.assertAllEqual(data_read, data)
# This test covers the following functions: ArraysToDatumPair,
# SerializePairToString, WritePairToFile, ReadPairFromFile,
# ParsePairFromString, DatumPairToArrays.
def testWriteAndReadPairToFile(self):
data_1 = np.array([[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]],
[[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]])
data_2 = np.array(
[[[255, 0, 5], [10, 300, 0]], [[20, 1, 100], [255, 360, 420]]],
dtype='uint32')
filename = os.path.join(FLAGS.test_tmpdir, 'test.datum_pair')
datum_io.WritePairToFile(data_1, data_2, filename)
data_read_1, data_read_2 = datum_io.ReadPairFromFile(filename)
self.assertAllEqual(data_read_1, data_1)
self.assertAllEqual(data_read_2, data_2)
if __name__ == '__main__':
tf.test.main()
| 3,901 | 38.816327 | 80 | py |
models | models-master/research/delf/delf/python/utils_test.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for helper utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from delf import delf_config_pb2
from delf import utils
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('Max-1Min-1', -1, -1, 1.0, False, [4, 2, 3], [1.0, 1.0]),
('Max-1Min-1Square', -1, -1, 1.0, True, [4, 4, 3], [1.0, 2.0]),
('Max2Min-1', 2, -1, 1.0, False, [2, 1, 3], [0.5, 0.5]),
('Max2Min-1Square', 2, -1, 1.0, True, [2, 2, 3], [0.5, 1.0]),
('Max8Min-1', 8, -1, 1.0, False, [4, 2, 3], [1.0, 1.0]),
('Max8Min-1Square', 8, -1, 1.0, True, [4, 4, 3], [1.0, 2.0]),
('Max-1Min1', -1, 1, 1.0, False, [4, 2, 3], [1.0, 1.0]),
('Max-1Min1Square', -1, 1, 1.0, True, [4, 4, 3], [1.0, 2.0]),
('Max-1Min8', -1, 8, 1.0, False, [8, 4, 3], [2.0, 2.0]),
('Max-1Min8Square', -1, 8, 1.0, True, [8, 8, 3], [2.0, 4.0]),
('Max16Min8', 16, 8, 1.0, False, [8, 4, 3], [2.0, 2.0]),
('Max16Min8Square', 16, 8, 1.0, True, [8, 8, 3], [2.0, 4.0]),
('Max2Min2', 2, 2, 1.0, False, [2, 1, 3], [0.5, 0.5]),
('Max2Min2Square', 2, 2, 1.0, True, [2, 2, 3], [0.5, 1.0]),
('Max-1Min-1Factor0.5', -1, -1, 0.5, False, [4, 2, 3], [1.0, 1.0]),
('Max-1Min-1Factor0.5Square', -1, -1, 0.5, True, [4, 4, 3], [1.0, 2.0]),
('Max2Min-1Factor2.0', 2, -1, 2.0, False, [4, 2, 3], [1.0, 1.0]),
('Max2Min-1Factor2.0Square', 2, -1, 2.0, True, [4, 4, 3], [1.0, 2.0]),
('Max-1Min8Factor0.5', -1, 8, 0.5, False, [4, 2, 3], [1.0, 1.0]),
('Max-1Min8Factor0.5Square', -1, 8, 0.5, True, [4, 4, 3], [1.0, 2.0]),
('Max-1Min8Factor0.25', -1, 8, 0.25, False, [4, 2, 3], [1.0, 1.0]),
('Max-1Min8Factor0.25Square', -1, 8, 0.25, True, [4, 4, 3], [1.0, 2.0]),
('Max2Min2Factor2.0', 2, 2, 2.0, False, [4, 2, 3], [1.0, 1.0]),
('Max2Min2Factor2.0Square', 2, 2, 2.0, True, [4, 4, 3], [1.0, 2.0]),
('Max16Min8Factor0.5', 16, 8, 0.5, False, [4, 2, 3], [1.0, 1.0]),
('Max16Min8Factor0.5Square', 16, 8, 0.5, True, [4, 4, 3], [1.0, 2.0]),
)
def testResizeImageWorks(self, max_image_size, min_image_size, resize_factor,
square_output, expected_shape,
expected_scale_factors):
# Construct image of size 4x2x3.
image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]],
[[4, 4, 4], [5, 5, 5]], [[6, 6, 6], [7, 7, 7]]],
dtype='uint8')
# Set up config.
config = delf_config_pb2.DelfConfig(
max_image_size=max_image_size,
min_image_size=min_image_size,
use_square_images=square_output)
resized_image, scale_factors = utils.ResizeImage(image, config,
resize_factor)
self.assertAllEqual(resized_image.shape, expected_shape)
self.assertAllClose(scale_factors, expected_scale_factors)
@parameterized.named_parameters(
('Max2Min2', 2, 2, 1.0, False, [2, 1, 3], [0.666666, 0.5]),
('Max2Min2Square', 2, 2, 1.0, True, [2, 2, 3], [0.666666, 1.0]),
)
def testResizeImageRoundingWorks(self, max_image_size, min_image_size,
resize_factor, square_output, expected_shape,
expected_scale_factors):
# Construct image of size 3x2x3.
image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]],
[[4, 4, 4], [5, 5, 5]]],
dtype='uint8')
# Set up config.
config = delf_config_pb2.DelfConfig(
max_image_size=max_image_size,
min_image_size=min_image_size,
use_square_images=square_output)
resized_image, scale_factors = utils.ResizeImage(image, config,
resize_factor)
self.assertAllEqual(resized_image.shape, expected_shape)
self.assertAllClose(scale_factors, expected_scale_factors)
if __name__ == '__main__':
tf.test.main()
| 4,805 | 45.211538 | 80 | py |
models | models-master/research/delf/delf/python/examples/extract_features.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts DELF features from a list of images, saving them to file.
The images must be in JPG format. The program checks if descriptors already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import numpy as np
from six.moves import range
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import feature_io
from delf import utils
from delf import extractor
cmd_args = None
# Extension of feature files.
_DELF_EXT = '.delf'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def main(unused_argv):
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.io.gfile.GFile(cmd_args.config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directory if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
extractor_fn = extractor.MakeExtractor(config)
start = time.time()
for i in range(num_images):
# Report progress once in a while.
if i == 0:
print('Starting to extract DELF features from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
out_desc_filename = os.path.splitext(os.path.basename(
image_paths[i]))[0] + _DELF_EXT
out_desc_fullpath = os.path.join(cmd_args.output_dir, out_desc_filename)
if tf.io.gfile.exists(out_desc_fullpath):
print(f'Skipping {image_paths[i]}')
continue
im = np.array(utils.RgbLoader(image_paths[i]))
# Extract and save features.
extracted_features = extractor_fn(im)
locations_out = extracted_features['local_features']['locations']
descriptors_out = extracted_features['local_features']['descriptors']
feature_scales_out = extracted_features['local_features']['scales']
attention_out = extracted_features['local_features']['attention']
feature_io.WriteToFile(out_desc_fullpath, locations_out, feature_scales_out,
descriptors_out, attention_out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--config_path',
type=str,
default='delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images whose DELF features will be extracted.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_features',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 4,596 | 31.146853 | 80 | py |
models | models-master/research/delf/delf/python/examples/extractor.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to construct DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf import datum_io
from delf import feature_extractor
from delf import utils
# Minimum dimensions below which features are not extracted (empty
# features are returned). This applies after any resizing is performed.
_MIN_HEIGHT = 10
_MIN_WIDTH = 10
def MakeExtractor(config):
"""Creates a function to extract global and/or local features from an image.
Args:
config: DelfConfig proto containing the model configuration.
Returns:
Function that receives an image and returns features.
Raises:
ValueError: if config is invalid.
"""
# Assert the configuration.
if not config.use_local_features and not config.use_global_features:
raise ValueError('Invalid config: at least one of '
'{use_local_features, use_global_features} must be True')
# Load model.
model = tf.saved_model.load(config.model_path)
# Input image scales to use for extraction.
image_scales_tensor = tf.convert_to_tensor(list(config.image_scales))
# Input (feeds) and output (fetches) end-points. These are only needed when
# using a model that was exported using TF1.
feeds = ['input_image:0', 'input_scales:0']
fetches = []
# Custom configuration needed when local features are used.
if config.use_local_features:
# Extra input/output end-points/tensors.
feeds.append('input_abs_thres:0')
feeds.append('input_max_feature_num:0')
fetches.append('boxes:0')
fetches.append('features:0')
fetches.append('scales:0')
fetches.append('scores:0')
score_threshold_tensor = tf.constant(
config.delf_local_config.score_threshold)
max_feature_num_tensor = tf.constant(
config.delf_local_config.max_feature_num)
# If using PCA, pre-load required parameters.
local_pca_parameters = {}
if config.delf_local_config.use_pca:
local_pca_parameters['mean'] = tf.constant(
datum_io.ReadFromFile(
config.delf_local_config.pca_parameters.mean_path),
dtype=tf.float32)
local_pca_parameters['matrix'] = tf.constant(
datum_io.ReadFromFile(
config.delf_local_config.pca_parameters.projection_matrix_path),
dtype=tf.float32)
local_pca_parameters[
'dim'] = config.delf_local_config.pca_parameters.pca_dim
local_pca_parameters['use_whitening'] = (
config.delf_local_config.pca_parameters.use_whitening)
if config.delf_local_config.pca_parameters.use_whitening:
local_pca_parameters['variances'] = tf.squeeze(
tf.constant(
datum_io.ReadFromFile(
config.delf_local_config.pca_parameters.pca_variances_path),
dtype=tf.float32))
else:
local_pca_parameters['variances'] = None
# Custom configuration needed when global features are used.
if config.use_global_features:
# Extra input/output end-points/tensors.
feeds.append('input_global_scales_ind:0')
fetches.append('global_descriptors:0')
if config.delf_global_config.image_scales_ind:
global_scales_ind_tensor = tf.constant(
list(config.delf_global_config.image_scales_ind))
else:
global_scales_ind_tensor = tf.range(len(config.image_scales))
# If using PCA, pre-load required parameters.
global_pca_parameters = {}
if config.delf_global_config.use_pca:
global_pca_parameters['mean'] = tf.constant(
datum_io.ReadFromFile(
config.delf_global_config.pca_parameters.mean_path),
dtype=tf.float32)
global_pca_parameters['matrix'] = tf.constant(
datum_io.ReadFromFile(
config.delf_global_config.pca_parameters.projection_matrix_path),
dtype=tf.float32)
global_pca_parameters[
'dim'] = config.delf_global_config.pca_parameters.pca_dim
global_pca_parameters['use_whitening'] = (
config.delf_global_config.pca_parameters.use_whitening)
if config.delf_global_config.pca_parameters.use_whitening:
global_pca_parameters['variances'] = tf.squeeze(
tf.constant(
datum_io.ReadFromFile(config.delf_global_config.pca_parameters
.pca_variances_path),
dtype=tf.float32))
else:
global_pca_parameters['variances'] = None
if not hasattr(config, 'is_tf2_exported') or not config.is_tf2_exported:
model = model.prune(feeds=feeds, fetches=fetches)
def ExtractorFn(image, resize_factor=1.0):
"""Receives an image and returns DELF global and/or local features.
If image is too small, returns empty features.
Args:
image: Uint8 array with shape (height, width, 3) containing the RGB image.
resize_factor: Optional float resize factor for the input image. If given,
the maximum and minimum allowed image sizes in the config are scaled by
this factor.
Returns:
extracted_features: A dict containing the extracted global descriptors
(key 'global_descriptor' mapping to a [D] float array), and/or local
features (key 'local_features' mapping to a dict with keys 'locations',
'descriptors', 'scales', 'attention').
"""
resized_image, scale_factors = utils.ResizeImage(
image, config, resize_factor=resize_factor)
# If the image is too small, returns empty features.
if resized_image.shape[0] < _MIN_HEIGHT or resized_image.shape[
1] < _MIN_WIDTH:
extracted_features = {'global_descriptor': np.array([])}
if config.use_local_features:
extracted_features.update({
'local_features': {
'locations': np.array([]),
'descriptors': np.array([]),
'scales': np.array([]),
'attention': np.array([]),
}
})
return extracted_features
# Input tensors.
image_tensor = tf.convert_to_tensor(resized_image)
# Extracted features.
extracted_features = {}
output = None
if hasattr(config, 'is_tf2_exported') and config.is_tf2_exported:
predict = model.signatures['serving_default']
if config.use_local_features and config.use_global_features:
output_dict = predict(
input_image=image_tensor,
input_scales=image_scales_tensor,
input_max_feature_num=max_feature_num_tensor,
input_abs_thres=score_threshold_tensor,
input_global_scales_ind=global_scales_ind_tensor)
output = [
output_dict['boxes'], output_dict['features'],
output_dict['scales'], output_dict['scores'],
output_dict['global_descriptors']
]
elif config.use_local_features:
output_dict = predict(
input_image=image_tensor,
input_scales=image_scales_tensor,
input_max_feature_num=max_feature_num_tensor,
input_abs_thres=score_threshold_tensor)
output = [
output_dict['boxes'], output_dict['features'],
output_dict['scales'], output_dict['scores']
]
else:
output_dict = predict(
input_image=image_tensor,
input_scales=image_scales_tensor,
input_global_scales_ind=global_scales_ind_tensor)
output = [output_dict['global_descriptors']]
else:
if config.use_local_features and config.use_global_features:
output = model(image_tensor, image_scales_tensor,
score_threshold_tensor, max_feature_num_tensor,
global_scales_ind_tensor)
elif config.use_local_features:
output = model(image_tensor, image_scales_tensor,
score_threshold_tensor, max_feature_num_tensor)
else:
output = model(image_tensor, image_scales_tensor,
global_scales_ind_tensor)
# Post-process extracted features: normalize, PCA (optional), pooling.
if config.use_global_features:
raw_global_descriptors = output[-1]
global_descriptors_per_scale = feature_extractor.PostProcessDescriptors(
raw_global_descriptors, config.delf_global_config.use_pca,
global_pca_parameters)
unnormalized_global_descriptor = tf.reduce_sum(
global_descriptors_per_scale, axis=0, name='sum_pooling')
global_descriptor = tf.nn.l2_normalize(
unnormalized_global_descriptor, axis=0, name='final_l2_normalization')
extracted_features.update({
'global_descriptor': global_descriptor.numpy(),
})
if config.use_local_features:
boxes = output[0]
raw_local_descriptors = output[1]
feature_scales = output[2]
attention_with_extra_dim = output[3]
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, local_descriptors = (
feature_extractor.DelfFeaturePostProcessing(
boxes, raw_local_descriptors, config.delf_local_config.use_pca,
local_pca_parameters))
if not config.delf_local_config.use_resized_coordinates:
locations /= scale_factors
extracted_features.update({
'local_features': {
'locations': locations.numpy(),
'descriptors': local_descriptors.numpy(),
'scales': feature_scales.numpy(),
'attention': attention.numpy(),
}
})
return extracted_features
return ExtractorFn
| 10,380 | 38.471483 | 80 | py |
models | models-master/research/delf/delf/python/examples/detector.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to construct object detector function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def MakeDetector(model_dir):
"""Creates a function to detect objects in an image.
Args:
model_dir: Directory where SavedModel is located.
Returns:
Function that receives an image and returns detection results.
"""
model = tf.saved_model.load(model_dir)
# Input and output tensors.
feeds = ['input_images:0']
fetches = ['detection_boxes:0', 'detection_scores:0', 'detection_classes:0']
model = model.prune(feeds=feeds, fetches=fetches)
def DetectorFn(images):
"""Receives an image and returns detected boxes.
Args:
images: Uint8 array with shape (batch, height, width 3) containing a batch
of RGB images.
Returns:
Tuple (boxes, scores, class_indices).
"""
boxes, scores, class_indices = model(tf.convert_to_tensor(images))
return boxes.numpy(), scores.numpy(), class_indices.numpy()
return DetectorFn
| 1,760 | 30.446429 | 80 | py |
models | models-master/research/delf/delf/python/examples/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/delf/delf/python/examples/match_images.py | # Lint as: python3
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matches two images using their DELF features.
The matching is done using feature-based nearest-neighbor search, followed by
geometric verification using RANSAC.
The DELF features can be extracted using the extract_features.py script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from absl import app
import matplotlib
# Needed before pyplot import for matplotlib to work properly.
matplotlib.use('Agg')
import matplotlib.image as mpimg # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt
import numpy as np
from scipy import spatial
from skimage import feature
from skimage import measure
from skimage import transform
from delf import feature_io
cmd_args = None
_DISTANCE_THRESHOLD = 0.8
def main(unused_argv):
# Read features.
locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
cmd_args.features_1_path)
num_features_1 = locations_1.shape[0]
print(f"Loaded image 1's {num_features_1} features")
locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
cmd_args.features_2_path)
num_features_2 = locations_2.shape[0]
print(f"Loaded image 2's {num_features_2} features")
# Find nearest-neighbor matches using a KD tree.
d1_tree = spatial.cKDTree(descriptors_1)
_, indices = d1_tree.query(
descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)
# Select feature locations for putative matches.
locations_2_to_use = np.array([
locations_2[i,]
for i in range(num_features_2)
if indices[i] != num_features_1
])
locations_1_to_use = np.array([
locations_1[indices[i],]
for i in range(num_features_2)
if indices[i] != num_features_1
])
# Perform geometric verification using RANSAC.
_, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
transform.AffineTransform,
min_samples=3,
residual_threshold=20,
max_trials=1000)
print(f'Found {sum(inliers)} inliers')
# Visualize correspondences, and save to file.
_, ax = plt.subplots()
img_1 = mpimg.imread(cmd_args.image_1_path)
img_2 = mpimg.imread(cmd_args.image_2_path)
inlier_idxs = np.nonzero(inliers)[0]
feature.plot_matches(
ax,
img_1,
img_2,
locations_1_to_use,
locations_2_to_use,
np.column_stack((inlier_idxs, inlier_idxs)),
matches_color='b')
ax.axis('off')
ax.set_title('DELF correspondences')
plt.savefig(cmd_args.output_image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--image_1_path',
type=str,
default='test_images/image_1.jpg',
help="""
Path to test image 1.
""")
parser.add_argument(
'--image_2_path',
type=str,
default='test_images/image_2.jpg',
help="""
Path to test image 2.
""")
parser.add_argument(
'--features_1_path',
type=str,
default='test_features/image_1.delf',
help="""
Path to DELF features from image 1.
""")
parser.add_argument(
'--features_2_path',
type=str,
default='test_features/image_2.delf',
help="""
Path to DELF features from image 2.
""")
parser.add_argument(
'--output_image',
type=str,
default='test_match.png',
help="""
Path where an image showing the matches will be saved.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 4,409 | 29.413793 | 80 | py |
models | models-master/research/delf/delf/python/examples/extract_boxes.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 7,510 | 31.656522 | 80 | py |
models | models-master/research/delf/delf/python/training/tensorboard_utils.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for tensorboard."""
from tensorboard import program
from delf.python.training import global_features_utils
def launch_tensorboard(log_dir):
"""Runs tensorboard with the given `log_dir`.
Args:
log_dir: String, directory to launch tensorboard in.
"""
tensorboard = program.TensorBoard()
tensorboard.configure(argv=[None, '--logdir', log_dir])
url = tensorboard.launch()
global_features_utils.debug_and_log("Launching Tensorboard: {}".format(url))
| 1,169 | 35.5625 | 80 | py |
models | models-master/research/delf/delf/python/training/build_image_dataset.py | #!/usr/bin/python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts landmark image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files ends up with '.jpg'.
This script converts the training and testing data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-00128
train_directory/train-00001-of-00128
...
train_directory/train-00127-of-00128
and
test_directory/test-00000-of-00128
test_directory/test-00001-of-00128
...
test_directory/test-00127-of-00128
where we have selected 128 shards for both data sets. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string, the unique id of the image file
e.g. '97c0a12e07ae8dd5' or '650c989dd3493748'
Furthermore, if the data set type is training, it would contain one more field:
image/class/label: integer, the landmark_id from the input training csv file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('train_directory', '/tmp/', 'Training data directory.')
flags.DEFINE_string('test_directory', None,
'(Optional) Testing data directory. Required only if '
'test_csv_path is not None.')
flags.DEFINE_string('output_directory', '/tmp/', 'Output data directory.')
flags.DEFINE_string('train_csv_path', '/tmp/train.csv',
'Training data csv file path.')
flags.DEFINE_string('train_clean_csv_path', None,
('(Optional) Clean training data csv file path. '
'If provided, filters images keeping the ones listed in '
'this file. In this case, also outputs a CSV file '
'relabeling.csv mapping new labels to old ones.'))
flags.DEFINE_string('test_csv_path', None,
'(Optional) Testing data csv file path. If None or absent,'
'TFRecords for the images in the test dataset are not'
'generated')
flags.DEFINE_integer('num_shards', 128, 'Number of shards in output data.')
flags.DEFINE_boolean('generate_train_validation_splits', False,
'(Optional) Whether to split the train dataset into'
'TRAIN and VALIDATION splits.')
flags.DEFINE_float('validation_split_size', 0.2,
'(Optional) The size of the VALIDATION split as a fraction'
'of the train dataset.')
flags.DEFINE_integer('seed', 0,
'(Optional) The seed to be used while shuffling the train'
'dataset when generating the TRAIN and VALIDATION splits.'
'Recommended for splits reproducibility purposes.')
_FILE_IDS_KEY = 'file_ids'
_IMAGE_PATHS_KEY = 'image_paths'
_LABELS_KEY = 'labels'
_TEST_SPLIT = 'test'
_TRAIN_SPLIT = 'train'
_VALIDATION_SPLIT = 'validation'
def _get_all_image_files_and_labels(name, csv_path, image_dir):
"""Process input and get the image file paths, image ids and the labels.
Args:
name: 'train' or 'test'.
csv_path: path to the Google-landmark Dataset csv Data Sources files.
image_dir: directory that stores downloaded images.
Returns:
image_paths: the paths to all images in the image_dir.
file_ids: the unique ids of images.
labels: the landmark id of all images. When name='test', the returned labels
will be an empty list.
Raises:
ValueError: if input name is not supported.
"""
image_paths = tf.io.gfile.glob(os.path.join(image_dir, '*.jpg'))
file_ids = [os.path.basename(os.path.normpath(f))[:-4] for f in image_paths]
if name == _TRAIN_SPLIT:
with tf.io.gfile.GFile(csv_path, 'rb') as csv_file:
df = pd.read_csv(csv_file)
df = df.set_index('id')
labels = [int(df.loc[fid]['landmark_id']) for fid in file_ids]
elif name == _TEST_SPLIT:
labels = []
else:
raise ValueError('Unsupported dataset split name: %s' % name)
return image_paths, file_ids, labels
def _get_clean_train_image_files_and_labels(csv_path, image_dir):
"""Get image file paths, image ids and labels for the clean training split.
Args:
csv_path: path to the Google-landmark Dataset v2 CSV Data Sources files
of the clean train dataset. Assumes CSV header landmark_id;images.
image_dir: directory that stores downloaded images.
Returns:
image_paths: the paths to all images in the image_dir.
file_ids: the unique ids of images.
labels: the landmark id of all images.
relabeling: relabeling rules created to replace actual labels with
a continuous set of labels.
"""
# Load the content of the CSV file (landmark_id/label -> images).
with tf.io.gfile.GFile(csv_path, 'rb') as csv_file:
df = pd.read_csv(csv_file)
# Create the dictionary (key = image_id, value = {label, file_id}).
images = {}
for _, row in df.iterrows():
label = row['landmark_id']
for file_id in row['images'].split(' '):
images[file_id] = {}
images[file_id]['label'] = label
images[file_id]['file_id'] = file_id
# Add the full image path to the dictionary of images.
image_paths = tf.io.gfile.glob(os.path.join(image_dir, '*.jpg'))
for image_path in image_paths:
file_id = os.path.basename(os.path.normpath(image_path))[:-4]
if file_id in images:
images[file_id]['image_path'] = image_path
# Explode the dictionary into lists (1 per image attribute).
image_paths = []
file_ids = []
labels = []
for _, value in images.items():
image_paths.append(value['image_path'])
file_ids.append(value['file_id'])
labels.append(value['label'])
# Relabel image labels to contiguous values.
unique_labels = sorted(set(labels))
relabeling = {label: index for index, label in enumerate(unique_labels)}
new_labels = [relabeling[label] for label in labels]
return image_paths, file_ids, new_labels, relabeling
def _process_image(filename):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.jpg'.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
Raises:
ValueError: if parsed image has wrong number of dimensions or channels.
"""
# Read the image file.
with tf.io.gfile.GFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = tf.io.decode_jpeg(image_data, channels=3)
# Check that image converted to RGB
if len(image.shape) != 3:
raise ValueError('The parsed image number of dimensions is not 3 but %d' %
(image.shape))
height = image.shape[0]
width = image.shape[1]
if image.shape[2] != 3:
raise ValueError('The parsed image channels is not 3 but %d' %
(image.shape[2]))
return image_data, height, width
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(file_id, image_buffer, height, width, label=None):
"""Build an Example proto for the given inputs.
Args:
file_id: string, unique id of an image file, e.g., '97c0a12e07ae8dd5'.
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
label: integer, the landmark id and prediction label.
Returns:
Example proto.
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
features = {
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace.encode('utf-8')),
'image/channels': _int64_feature(channels),
'image/format': _bytes_feature(image_format.encode('utf-8')),
'image/id': _bytes_feature(file_id.encode('utf-8')),
'image/encoded': _bytes_feature(image_buffer)
}
if label is not None:
features['image/class/label'] = _int64_feature(label)
example = tf.train.Example(features=tf.train.Features(feature=features))
return example
def _write_tfrecord(output_prefix, image_paths, file_ids, labels):
"""Read image files and write image and label data into TFRecord files.
Args:
output_prefix: string, the prefix of output files, e.g. 'train'.
image_paths: list of strings, the paths to images to be converted.
file_ids: list of strings, the image unique ids.
labels: list of integers, the landmark ids of images. It is an empty list
when output_prefix='test'.
Raises:
ValueError: if the length of input images, ids and labels don't match
"""
if output_prefix == _TEST_SPLIT:
labels = [None] * len(image_paths)
if not len(image_paths) == len(file_ids) == len(labels):
raise ValueError('length of image_paths, file_ids, labels shoud be the' +
' same. But they are %d, %d, %d, respectively' %
(len(image_paths), len(file_ids), len(labels)))
spacing = np.linspace(0, len(image_paths), FLAGS.num_shards + 1, dtype=np.int)
for shard in range(FLAGS.num_shards):
output_file = os.path.join(
FLAGS.output_directory,
'%s-%.5d-of-%.5d' % (output_prefix, shard, FLAGS.num_shards))
writer = tf.io.TFRecordWriter(output_file)
print('Processing shard ', shard, ' and writing file ', output_file)
for i in range(spacing[shard], spacing[shard + 1]):
image_buffer, height, width = _process_image(image_paths[i])
example = _convert_to_example(file_ids[i], image_buffer, height, width,
labels[i])
writer.write(example.SerializeToString())
writer.close()
def _write_relabeling_rules(relabeling_rules):
"""Write to a file the relabeling rules when the clean train dataset is used.
Args:
relabeling_rules: dictionary of relabeling rules applied when the clean
train dataset is used (key = old_label, value = new_label).
"""
relabeling_file_name = os.path.join(FLAGS.output_directory,
'relabeling.csv')
with tf.io.gfile.GFile(relabeling_file_name, 'w') as relabeling_file:
csv_writer = csv.writer(relabeling_file, delimiter=',')
csv_writer.writerow(['new_label', 'old_label'])
for old_label, new_label in relabeling_rules.items():
csv_writer.writerow([new_label, old_label])
def _shuffle_by_columns(np_array, random_state):
"""Shuffle the columns of a 2D numpy array.
Args:
np_array: array to shuffle.
random_state: numpy RandomState to be used for shuffling.
Returns:
The shuffled array.
"""
columns = np_array.shape[1]
columns_indices = np.arange(columns)
random_state.shuffle(columns_indices)
return np_array[:, columns_indices]
def _build_train_and_validation_splits(image_paths, file_ids, labels,
validation_split_size, seed):
"""Create TRAIN and VALIDATION splits containg all labels in equal proportion.
Args:
image_paths: list of paths to the image files in the train dataset.
file_ids: list of image file ids in the train dataset.
labels: list of image labels in the train dataset.
validation_split_size: size of the VALIDATION split as a ratio of the train
dataset.
seed: seed to use for shuffling the dataset for reproducibility purposes.
Returns:
splits : tuple containing the TRAIN and VALIDATION splits.
Raises:
ValueError: if the image attributes arrays don't all have the same length,
which makes the shuffling impossible.
"""
# Ensure all image attribute arrays have the same length.
total_images = len(file_ids)
if not (len(image_paths) == total_images and len(labels) == total_images):
raise ValueError('Inconsistencies between number of file_ids (%d), number '
'of image_paths (%d) and number of labels (%d). Cannot'
'shuffle the train dataset.'% (total_images,
len(image_paths),
len(labels)))
# Stack all image attributes arrays in a single 2D array of dimensions
# (3, number of images) and group by label the indices of datapoins in the
# image attributes arrays. Explicitly convert label types from 'int' to 'str'
# to avoid implicit conversion during stacking with image_paths and file_ids
# which are 'str'.
labels_str = [str(label) for label in labels]
image_attrs = np.stack((image_paths, file_ids, labels_str))
image_attrs_idx_by_label = {}
for index, label in enumerate(labels):
if label not in image_attrs_idx_by_label:
image_attrs_idx_by_label[label] = []
image_attrs_idx_by_label[label].append(index)
# Create subsets of image attributes by label, shuffle them separately and
# split each subset into TRAIN and VALIDATION splits based on the size of the
# validation split.
splits = {
_VALIDATION_SPLIT: [],
_TRAIN_SPLIT: []
}
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(seed)))
for label, indexes in image_attrs_idx_by_label.items():
# Create the subset for the current label.
image_attrs_label = image_attrs[:, indexes]
# Shuffle the current label subset.
image_attrs_label = _shuffle_by_columns(image_attrs_label, rs)
# Split the current label subset into TRAIN and VALIDATION splits and add
# each split to the list of all splits.
images_per_label = image_attrs_label.shape[1]
cutoff_idx = max(1, int(validation_split_size * images_per_label))
splits[_VALIDATION_SPLIT].append(image_attrs_label[:, 0 : cutoff_idx])
splits[_TRAIN_SPLIT].append(image_attrs_label[:, cutoff_idx : ])
# Concatenate all subsets of image attributes into TRAIN and VALIDATION splits
# and reshuffle them again to ensure variance of labels across batches.
validation_split = _shuffle_by_columns(
np.concatenate(splits[_VALIDATION_SPLIT], axis=1), rs)
train_split = _shuffle_by_columns(
np.concatenate(splits[_TRAIN_SPLIT], axis=1), rs)
# Unstack the image attribute arrays in the TRAIN and VALIDATION splits and
# convert them back to lists. Convert labels back to 'int' from 'str'
# following the explicit type change from 'str' to 'int' for stacking.
return (
{
_IMAGE_PATHS_KEY: validation_split[0, :].tolist(),
_FILE_IDS_KEY: validation_split[1, :].tolist(),
_LABELS_KEY: [int(label) for label in validation_split[2, :].tolist()]
}, {
_IMAGE_PATHS_KEY: train_split[0, :].tolist(),
_FILE_IDS_KEY: train_split[1, :].tolist(),
_LABELS_KEY: [int(label) for label in train_split[2, :].tolist()]
})
def _build_train_tfrecord_dataset(csv_path,
clean_csv_path,
image_dir,
generate_train_validation_splits,
validation_split_size,
seed):
"""Build a TFRecord dataset for the train split.
Args:
csv_path: path to the train Google-landmark Dataset csv Data Sources files.
clean_csv_path: path to the Google-landmark Dataset v2 CSV Data Sources
files of the clean train dataset.
image_dir: directory that stores downloaded images.
generate_train_validation_splits: whether to split the test dataset into
TRAIN and VALIDATION splits.
validation_split_size: size of the VALIDATION split as a ratio of the train
dataset. Only used if 'generate_train_validation_splits' is True.
seed: seed to use for shuffling the dataset for reproducibility purposes.
Only used if 'generate_train_validation_splits' is True.
Returns:
Nothing. After the function call, sharded TFRecord files are materialized.
Raises:
ValueError: if the size of the VALIDATION split is outside (0,1) when TRAIN
and VALIDATION splits need to be generated.
"""
# Make sure the size of the VALIDATION split is inside (0, 1) if we need to
# generate the TRAIN and VALIDATION splits.
if generate_train_validation_splits:
if validation_split_size <= 0 or validation_split_size >= 1:
raise ValueError('Invalid VALIDATION split size. Expected inside (0,1)'
'but received %f.' % validation_split_size)
if clean_csv_path:
# Load clean train images and labels and write the relabeling rules.
(image_paths, file_ids, labels,
relabeling_rules) = _get_clean_train_image_files_and_labels(clean_csv_path,
image_dir)
_write_relabeling_rules(relabeling_rules)
else:
# Load all train images.
image_paths, file_ids, labels = _get_all_image_files_and_labels(
_TRAIN_SPLIT, csv_path, image_dir)
if generate_train_validation_splits:
# Generate the TRAIN and VALIDATION splits and write them to TFRecord.
validation_split, train_split = _build_train_and_validation_splits(
image_paths, file_ids, labels, validation_split_size, seed)
_write_tfrecord(_VALIDATION_SPLIT,
validation_split[_IMAGE_PATHS_KEY],
validation_split[_FILE_IDS_KEY],
validation_split[_LABELS_KEY])
_write_tfrecord(_TRAIN_SPLIT,
train_split[_IMAGE_PATHS_KEY],
train_split[_FILE_IDS_KEY],
train_split[_LABELS_KEY])
else:
# Write to TFRecord a single split, TRAIN.
_write_tfrecord(_TRAIN_SPLIT, image_paths, file_ids, labels)
def _build_test_tfrecord_dataset(csv_path, image_dir):
"""Build a TFRecord dataset for the 'test' split.
Args:
csv_path: path to the 'test' Google-landmark Dataset csv Data Sources files.
image_dir: directory that stores downloaded images.
Returns:
Nothing. After the function call, sharded TFRecord files are materialized.
"""
image_paths, file_ids, labels = _get_all_image_files_and_labels(
_TEST_SPLIT, csv_path, image_dir)
_write_tfrecord(_TEST_SPLIT, image_paths, file_ids, labels)
def main(unused_argv):
_build_train_tfrecord_dataset(FLAGS.train_csv_path,
FLAGS.train_clean_csv_path,
FLAGS.train_directory,
FLAGS.generate_train_validation_splits,
FLAGS.validation_split_size,
FLAGS.seed)
if FLAGS.test_csv_path is not None:
_build_test_tfrecord_dataset(FLAGS.test_csv_path, FLAGS.test_directory)
if __name__ == '__main__':
app.run(main)
| 20,150 | 40.040733 | 80 | py |
models | models-master/research/delf/delf/python/training/__init__.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for DELF training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.training import build_image_dataset
# pylint: enable=unused-import
| 946 | 40.173913 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features_utils.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for the global model training."""
import os
from absl import logging
import numpy as np
import tensorflow as tf
from delf.python.datasets.revisited_op import dataset as revisited_dataset
class AverageMeter():
"""Computes and stores the average and current value of loss."""
def __init__(self):
"""Initialization of the AverageMeter."""
self.reset()
def reset(self):
"""Resets all the values."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""Updates values in the AverageMeter.
Args:
val: Float, loss value.
n: Integer, number of instances.
"""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def compute_metrics_and_print(dataset_name,
sorted_index_ids,
ground_truth,
desired_pr_ranks=None,
log=True):
"""Computes and logs ground-truth metrics for Revisited datasets.
Args:
dataset_name: String, name of the dataset.
sorted_index_ids: Integer NumPy array of shape [#queries, #index_images].
For each query, contains an array denoting the most relevant index images,
sorted from most to least relevant.
ground_truth: List containing ground-truth information for dataset. Each
entry is a dict corresponding to the ground-truth information for a query.
The dict has keys 'ok' and 'junk', mapping to a NumPy array of integers.
desired_pr_ranks: List of integers containing the desired precision/recall
ranks to be reported. E.g., if precision@1/recall@1 and
precision@10/recall@10 are desired, this should be set to [1, 10]. The
largest item should be <= #sorted_index_ids. Default: [1, 5, 10].
log: Whether to log results using logging.info().
Returns:
mAP: (metricsE, metricsM, metricsH) Tuple of the metrics for different
levels of complexity. Each metrics is a list containing:
mean_average_precision (float), mean_precisions (NumPy array of
floats, with shape [len(desired_pr_ranks)]), mean_recalls (NumPy array
of floats, with shape [len(desired_pr_ranks)]), average_precisions
(NumPy array of floats, with shape [#queries]), precisions (NumPy array of
floats, with shape [#queries, len(desired_pr_ranks)]), recalls (NumPy
array of floats, with shape [#queries, len(desired_pr_ranks)]).
Raises:
ValueError: If an unknown dataset name is provided as an argument.
"""
if dataset_name not in revisited_dataset.DATASET_NAMES:
raise ValueError('Unknown dataset: {}!'.format(dataset))
if desired_pr_ranks is None:
desired_pr_ranks = [1, 5, 10]
(easy_ground_truth, medium_ground_truth,
hard_ground_truth) = revisited_dataset.ParseEasyMediumHardGroundTruth(
ground_truth)
metrics_easy = revisited_dataset.ComputeMetrics(sorted_index_ids,
easy_ground_truth,
desired_pr_ranks)
metrics_medium = revisited_dataset.ComputeMetrics(sorted_index_ids,
medium_ground_truth,
desired_pr_ranks)
metrics_hard = revisited_dataset.ComputeMetrics(sorted_index_ids,
hard_ground_truth,
desired_pr_ranks)
debug_and_log(
'>> {}: mAP E: {}, M: {}, H: {}'.format(
dataset_name, np.around(metrics_easy[0] * 100, decimals=2),
np.around(metrics_medium[0] * 100, decimals=2),
np.around(metrics_hard[0] * 100, decimals=2)),
log=log)
debug_and_log(
'>> {}: mP@k{} E: {}, M: {}, H: {}'.format(
dataset_name, desired_pr_ranks,
np.around(metrics_easy[1] * 100, decimals=2),
np.around(metrics_medium[1] * 100, decimals=2),
np.around(metrics_hard[1] * 100, decimals=2)),
log=log)
return metrics_easy, metrics_medium, metrics_hard
def htime(time_difference):
"""Time formatting function.
Depending on the value of `time_difference` outputs time in an appropriate
time format.
Args:
time_difference: Float, time difference between the two events.
Returns:
time: String representing time in an appropriate time format.
"""
time_difference = round(time_difference)
days = time_difference // 86400
hours = time_difference // 3600 % 24
minutes = time_difference // 60 % 60
seconds = time_difference % 60
if days > 0:
return '{:d}d {:d}h {:d}m {:d}s'.format(days, hours, minutes, seconds)
if hours > 0:
return '{:d}h {:d}m {:d}s'.format(hours, minutes, seconds)
if minutes > 0:
return '{:d}m {:d}s'.format(minutes, seconds)
return '{:d}s'.format(seconds)
def debug_and_log(msg, debug=True, log=True, debug_on_the_same_line=False):
"""Outputs `msg` to both stdout (if in the debug mode) and the log file.
Args:
msg: String, message to be logged.
debug: Bool, if True, will print `msg` to stdout.
log: Bool, if True, will redirect `msg` to the logfile.
debug_on_the_same_line: Bool, if True, will print `msg` to stdout without a
new line. When using this mode, logging to a logfile is disabled.
"""
if debug_on_the_same_line:
print(msg, end='')
return
if debug:
print(msg)
if log:
logging.info(msg)
def get_standard_keras_models():
"""Gets the standard keras model names.
Returns:
model_names: List, names of the standard keras models.
"""
model_names = sorted(
name for name in tf.keras.applications.__dict__
if not name.startswith('__') and
callable(tf.keras.applications.__dict__[name]))
return model_names
def create_model_directory(training_dataset, arch, pool, whitening, pretrained,
loss, loss_margin, optimizer, lr, weight_decay,
neg_num, query_size, pool_size, batch_size,
update_every, image_size, directory):
"""Based on the model parameters, creates the model directory.
If the model directory does not exist, the directory is created.
Args:
training_dataset: String, training dataset name.
arch: String, model architecture.
pool: String, pooling option.
whitening: Bool, whether the model is trained with global whitening.
pretrained: Bool, whether the model is initialized with the precomputed
weights.
loss: String, training loss type.
loss_margin: Float, loss margin.
optimizer: Sting, used optimizer.
lr: Float, initial learning rate.
weight_decay: Float, weight decay.
neg_num: Integer, Number of negative images per train/val tuple.
query_size: Integer, number of queries per one training epoch.
pool_size: Integer, size of the pool for hard negative mining.
batch_size: Integer, batch size.
update_every: Integer, frequency of the model weights update.
image_size: Integer, maximum size of longer image side used for training.
directory: String, destination where trained network should be saved.
Returns:
folder: String, path to the model folder.
"""
folder = '{}_{}_{}'.format(training_dataset, arch, pool)
if whitening:
folder += '_whiten'
if not pretrained:
folder += '_notpretrained'
folder += ('_{}_m{:.2f}_{}_lr{:.1e}_wd{:.1e}_nnum{}_qsize{}_psize{}_bsize{}'
'_uevery{}_imsize{}').format(loss, loss_margin, optimizer, lr,
weight_decay, neg_num, query_size,
pool_size, batch_size, update_every,
image_size)
folder = os.path.join(directory, folder)
debug_and_log(
'>> Creating directory if does not exist:\n>> \'{}\''.format(folder))
if not os.path.exists(folder):
os.makedirs(folder)
return folder
| 8,790 | 38.599099 | 80 | py |
models | models-master/research/delf/delf/python/training/train.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for DELF/G on Google Landmarks Dataset.
Uses classification loss, with MirroredStrategy, to support running on multiple
GPUs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_probability as tfp
# Placeholder for internal import. Do not remove this line.
from delf.python.datasets.google_landmarks_dataset import googlelandmarks as gld
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug', False, 'Debug mode.')
flags.DEFINE_string('logdir', '/tmp/delf', 'WithTensorBoard logdir.')
flags.DEFINE_string('train_file_pattern', '/tmp/data/train*',
'File pattern of training dataset files.')
flags.DEFINE_string('validation_file_pattern', '/tmp/data/validation*',
'File pattern of validation dataset files.')
flags.DEFINE_enum(
'dataset_version', 'gld_v1', ['gld_v1', 'gld_v2', 'gld_v2_clean'],
'Google Landmarks dataset version, used to determine the number of '
'classes.')
flags.DEFINE_integer('seed', 0, 'Seed to training dataset.')
flags.DEFINE_float('initial_lr', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('batch_size', 32, 'Global batch size.')
flags.DEFINE_integer('max_iters', 500000, 'Maximum iterations.')
flags.DEFINE_boolean('block3_strides', True, 'Whether to use block3_strides.')
flags.DEFINE_boolean('use_augmentation', True,
'Whether to use ImageNet style augmentation.')
flags.DEFINE_string(
'imagenet_checkpoint', None,
'ImageNet checkpoint for ResNet backbone. If None, no checkpoint is used.')
flags.DEFINE_float(
'attention_loss_weight', 1.0,
'Weight to apply to the attention loss when calculating the '
'total loss of the model.')
flags.DEFINE_boolean('delg_global_features', False,
'Whether to train a DELG model.')
flags.DEFINE_float(
'delg_gem_power', 3.0, 'Power for Generalized Mean pooling. Used only if '
'delg_global_features=True.')
flags.DEFINE_integer(
'delg_embedding_layer_dim', 2048,
'Size of the FC whitening layer (embedding layer). Used only if'
'delg_global_features:True.')
flags.DEFINE_float(
'delg_scale_factor_init', 45.25,
'Initial value of the scaling factor of the cosine logits. The default '
'value is sqrt(2048). Used only if delg_global_features=True.')
flags.DEFINE_float('delg_arcface_margin', 0.1,
'ArcFace margin. Used only if delg_global_features=True.')
flags.DEFINE_integer('image_size', 321, 'Size of each image side to use.')
flags.DEFINE_boolean('use_autoencoder', True,
'Whether to train an autoencoder.')
flags.DEFINE_float(
'reconstruction_loss_weight', 10.0,
'Weight to apply to the reconstruction loss from the autoencoder when'
'calculating total loss of the model. Used only if use_autoencoder=True.')
flags.DEFINE_float(
'autoencoder_dimensions', 128,
'Number of dimensions of the autoencoder. Used only if'
'use_autoencoder=True.')
flags.DEFINE_float(
'local_feature_map_channels', 1024,
'Number of channels at backbone layer used for local feature extraction. '
'Default value 1024 is the number of channels of block3. Used only if'
'use_autoencoder=True.')
def _record_accuracy(metric, logits, labels):
"""Record accuracy given predicted logits and ground-truth labels."""
softmax_probabilities = tf.keras.layers.Softmax()(logits)
metric.update_state(labels, softmax_probabilities)
def _attention_summaries(scores, global_step):
"""Record statistics of the attention score."""
tf.summary.image(
'batch_attention',
scores / tf.reduce_max(scores + 1e-3),
step=global_step)
tf.summary.scalar('attention/max', tf.reduce_max(scores), step=global_step)
tf.summary.scalar('attention/min', tf.reduce_min(scores), step=global_step)
tf.summary.scalar('attention/mean', tf.reduce_mean(scores), step=global_step)
tf.summary.scalar(
'attention/percent_25',
tfp.stats.percentile(scores, 25.0),
step=global_step)
tf.summary.scalar(
'attention/percent_50',
tfp.stats.percentile(scores, 50.0),
step=global_step)
tf.summary.scalar(
'attention/percent_75',
tfp.stats.percentile(scores, 75.0),
step=global_step)
def create_model(num_classes):
"""Define DELF model, and initialize classifiers."""
if FLAGS.delg_global_features:
model = delg_model.Delg(
block3_strides=FLAGS.block3_strides,
name='DELG',
gem_power=FLAGS.delg_gem_power,
embedding_layer_dim=FLAGS.delg_embedding_layer_dim,
scale_factor_init=FLAGS.delg_scale_factor_init,
arcface_margin=FLAGS.delg_arcface_margin,
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
else:
model = delf_model.Delf(
block3_strides=FLAGS.block3_strides,
name='DELF',
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
model.init_classifiers(num_classes)
return model
def _learning_rate_schedule(global_step_value, max_iters, initial_lr):
"""Calculates learning_rate with linear decay.
Args:
global_step_value: int, global step.
max_iters: int, maximum iterations.
initial_lr: float, initial learning rate.
Returns:
lr: float, learning rate.
"""
lr = initial_lr * (1.0 - global_step_value / max_iters)
return lr
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
#-------------------------------------------------------------
# Log flags used.
logging.info('Running training script with\n')
logging.info('logdir= %s', FLAGS.logdir)
logging.info('initial_lr= %f', FLAGS.initial_lr)
logging.info('block3_strides= %s', str(FLAGS.block3_strides))
# ------------------------------------------------------------
# Create the strategy.
strategy = tf.distribute.MirroredStrategy()
logging.info('Number of devices: %d', strategy.num_replicas_in_sync)
if FLAGS.debug:
print('Number of devices:', strategy.num_replicas_in_sync)
max_iters = FLAGS.max_iters
global_batch_size = FLAGS.batch_size
image_size = FLAGS.image_size
num_eval_batches = int(50000 / global_batch_size)
report_interval = 100
eval_interval = 1000
save_interval = 1000
initial_lr = FLAGS.initial_lr
clip_val = tf.constant(10.0)
if FLAGS.debug:
tf.config.run_functions_eagerly(True)
global_batch_size = 4
max_iters = 100
num_eval_batches = 1
save_interval = 1
report_interval = 10
# Determine the number of classes based on the version of the dataset.
gld_info = gld.GoogleLandmarksInfo()
num_classes = gld_info.num_classes[FLAGS.dataset_version]
# ------------------------------------------------------------
# Create the distributed train/validation sets.
train_dataset = gld.CreateDataset(
file_pattern=FLAGS.train_file_pattern,
batch_size=global_batch_size,
image_size=image_size,
augmentation=FLAGS.use_augmentation,
seed=FLAGS.seed)
validation_dataset = gld.CreateDataset(
file_pattern=FLAGS.validation_file_pattern,
batch_size=global_batch_size,
image_size=image_size,
augmentation=False,
seed=FLAGS.seed)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
validation_dist_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
train_iter = iter(train_dist_dataset)
validation_iter = iter(validation_dist_dataset)
# Create a checkpoint directory to store the checkpoints.
checkpoint_prefix = os.path.join(FLAGS.logdir, 'delf_tf2-ckpt')
# ------------------------------------------------------------
# Finally, we do everything in distributed scope.
with strategy.scope():
# Compute loss.
# Set reduction to `none` so we can do the reduction afterwards and divide
# by global batch size.
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_batch_size)
# Set up metrics.
desc_validation_loss = tf.keras.metrics.Mean(name='desc_validation_loss')
attn_validation_loss = tf.keras.metrics.Mean(name='attn_validation_loss')
desc_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='desc_train_accuracy')
attn_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='attn_train_accuracy')
desc_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='desc_validation_accuracy')
attn_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='attn_validation_accuracy')
# ------------------------------------------------------------
# Setup DELF model and optimizer.
model = create_model(num_classes)
logging.info('Model, datasets loaded.\nnum_classes= %d', num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=initial_lr, momentum=0.9)
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.logdir, 'train_logs'), flush_millis=10000)
# Setup checkpoint directory.
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint,
checkpoint_prefix,
max_to_keep=10,
keep_checkpoint_every_n_hours=3)
# Restores the checkpoint, if existing.
checkpoint.restore(manager.latest_checkpoint)
# ------------------------------------------------------------
# Train step to run on one GPU.
def train_step(inputs):
"""Train one batch."""
images, labels = inputs
# Temporary workaround to avoid some corrupted labels.
labels = tf.clip_by_value(labels, 0, model.num_classes)
def _backprop_loss(tape, loss, weights):
"""Backpropogate losses using clipped gradients.
Args:
tape: gradient tape.
loss: scalar Tensor, loss value.
weights: keras model weights.
"""
gradients = tape.gradient(loss, weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, weights))
# Record gradients and loss through backbone.
with tf.GradientTape() as gradient_tape:
# Make a forward pass to calculate prelogits.
(desc_prelogits, attn_prelogits, attn_scores, backbone_blocks,
dim_expanded_features, _) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
if FLAGS.delg_global_features:
desc_logits = model.desc_classification(desc_prelogits, labels)
else:
desc_logits = model.desc_classification(desc_prelogits)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Calculate reconstruction loss between the attention prelogits and the
# backbone.
if FLAGS.use_autoencoder:
block3 = tf.stop_gradient(backbone_blocks['block3'])
reconstruction_loss = tf.math.reduce_mean(
tf.keras.losses.MSE(block3, dim_expanded_features))
else:
reconstruction_loss = 0
# Cumulate global loss, attention loss and reconstruction loss.
total_loss = (
desc_loss + FLAGS.attention_loss_weight * attn_loss +
FLAGS.reconstruction_loss_weight * reconstruction_loss)
# Perform backpropagation through the descriptor and attention layers
# together. Note that this will increment the number of iterations of
# "optimizer".
_backprop_loss(gradient_tape, total_loss, model.trainable_weights)
# Step number, for summary purposes.
global_step = optimizer.iterations
# Input image-related summaries.
tf.summary.image('batch_images', (images + 1.0) / 2.0, step=global_step)
tf.summary.scalar(
'image_range/max', tf.reduce_max(images), step=global_step)
tf.summary.scalar(
'image_range/min', tf.reduce_min(images), step=global_step)
# Attention and sparsity summaries.
_attention_summaries(attn_scores, global_step)
activations_zero_fractions = {
'sparsity/%s' % k: tf.nn.zero_fraction(v)
for k, v in backbone_blocks.items()
}
for k, v in activations_zero_fractions.items():
tf.summary.scalar(k, v, step=global_step)
# Scaling factor summary for cosine logits for a DELG model.
if FLAGS.delg_global_features:
tf.summary.scalar(
'desc/scale_factor', model.scale_factor, step=global_step)
# Record train accuracies.
_record_accuracy(desc_train_accuracy, desc_logits, labels)
_record_accuracy(attn_train_accuracy, attn_logits, labels)
return desc_loss, attn_loss, reconstruction_loss
# ------------------------------------------------------------
def validation_step(inputs):
"""Validate one batch."""
images, labels = inputs
labels = tf.clip_by_value(labels, 0, model.num_classes)
# Get descriptor predictions.
blocks = {}
prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
if FLAGS.delg_global_features:
logits = model.desc_classification(prelogits, labels, training=False)
else:
logits = model.desc_classification(prelogits, training=False)
softmax_probabilities = tf.keras.layers.Softmax()(logits)
validation_loss = loss_object(labels, logits)
desc_validation_loss.update_state(validation_loss)
desc_validation_accuracy.update_state(labels, softmax_probabilities)
# Get attention predictions.
block3 = blocks['block3'] # pytype: disable=key-error
prelogits, _, _ = model.attention(block3, training=False)
logits = model.attn_classification(prelogits, training=False)
softmax_probabilities = tf.keras.layers.Softmax()(logits)
validation_loss = loss_object(labels, logits)
attn_validation_loss.update_state(validation_loss)
attn_validation_accuracy.update_state(labels, softmax_probabilities)
return desc_validation_accuracy.result(), attn_validation_accuracy.result(
)
# `run` replicates the provided computation and runs it
# with the distributed input.
@tf.function
def distributed_train_step(dataset_inputs):
"""Get the actual losses."""
# Each (desc, attn) is a list of 3 losses - crossentropy, reg, total.
desc_per_replica_loss, attn_per_replica_loss, recon_per_replica_loss = (
strategy.run(train_step, args=(dataset_inputs,)))
# Reduce over the replicas.
desc_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, desc_per_replica_loss, axis=None)
attn_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, attn_per_replica_loss, axis=None)
recon_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, recon_per_replica_loss, axis=None)
return desc_global_loss, attn_global_loss, recon_global_loss
@tf.function
def distributed_validation_step(dataset_inputs):
return strategy.run(validation_step, args=(dataset_inputs,))
# ------------------------------------------------------------
# *** TRAIN LOOP ***
with summary_writer.as_default():
record_cond = lambda: tf.equal(optimizer.iterations % report_interval, 0)
with tf.summary.record_if(record_cond):
global_step_value = optimizer.iterations.numpy()
# TODO(dananghel): try to load pretrained weights at backbone creation.
# Load pretrained weights for ResNet50 trained on ImageNet.
if (FLAGS.imagenet_checkpoint is not None) and (not global_step_value):
logging.info('Attempting to load ImageNet pretrained weights.')
input_batch = next(train_iter)
_, _, _ = distributed_train_step(input_batch)
model.backbone.restore_weights(FLAGS.imagenet_checkpoint)
logging.info('Done.')
else:
logging.info('Skip loading ImageNet pretrained weights.')
if FLAGS.debug:
model.backbone.log_weights()
last_summary_step_value = None
last_summary_time = None
while global_step_value < max_iters:
# input_batch : images(b, h, w, c), labels(b,).
try:
input_batch = next(train_iter)
except tf.errors.OutOfRangeError:
# Break if we run out of data in the dataset.
logging.info('Stopping training at global step %d, no more data',
global_step_value)
break
# Set learning rate and run the training step over num_gpu gpus.
optimizer.learning_rate = _learning_rate_schedule(
optimizer.iterations.numpy(), max_iters, initial_lr)
desc_dist_loss, attn_dist_loss, recon_dist_loss = (
distributed_train_step(input_batch))
# Step number, to be used for summary/logging.
global_step = optimizer.iterations
global_step_value = global_step.numpy()
# LR, losses and accuracies summaries.
tf.summary.scalar(
'learning_rate', optimizer.learning_rate, step=global_step)
tf.summary.scalar(
'loss/desc/crossentropy', desc_dist_loss, step=global_step)
tf.summary.scalar(
'loss/attn/crossentropy', attn_dist_loss, step=global_step)
if FLAGS.use_autoencoder:
tf.summary.scalar(
'loss/recon/mse', recon_dist_loss, step=global_step)
tf.summary.scalar(
'train_accuracy/desc',
desc_train_accuracy.result(),
step=global_step)
tf.summary.scalar(
'train_accuracy/attn',
attn_train_accuracy.result(),
step=global_step)
# Summary for number of global steps taken per second.
current_time = time.time()
if (last_summary_step_value is not None and
last_summary_time is not None):
tf.summary.scalar(
'global_steps_per_sec',
(global_step_value - last_summary_step_value) /
(current_time - last_summary_time),
step=global_step)
if tf.summary.should_record_summaries().numpy():
last_summary_step_value = global_step_value
last_summary_time = current_time
# Print to console if running locally.
if FLAGS.debug:
if global_step_value % report_interval == 0:
print(global_step.numpy())
print('desc:', desc_dist_loss.numpy())
print('attn:', attn_dist_loss.numpy())
# Validate once in {eval_interval*n, n \in N} steps.
if global_step_value % eval_interval == 0:
for i in range(num_eval_batches):
try:
validation_batch = next(validation_iter)
desc_validation_result, attn_validation_result = (
distributed_validation_step(validation_batch))
except tf.errors.OutOfRangeError:
logging.info('Stopping eval at batch %d, no more data', i)
break
# Log validation results to tensorboard.
tf.summary.scalar(
'validation/desc', desc_validation_result, step=global_step)
tf.summary.scalar(
'validation/attn', attn_validation_result, step=global_step)
logging.info('\nValidation(%f)\n', global_step_value)
logging.info(': desc: %f\n', desc_validation_result.numpy())
logging.info(': attn: %f\n', attn_validation_result.numpy())
# Print to console.
if FLAGS.debug:
print('Validation: desc:', desc_validation_result.numpy())
print(' : attn:', attn_validation_result.numpy())
# Save checkpoint once (each save_interval*n, n \in N) steps, or if
# this is the last iteration.
# TODO(andrearaujo): save only in one of the two ways. They are
# identical, the only difference is that the manager adds some extra
# prefixes and variables (eg, optimizer variables).
if (global_step_value % save_interval
== 0) or (global_step_value >= max_iters):
save_path = manager.save(checkpoint_number=global_step_value)
logging.info('Saved (%d) at %s', global_step_value, save_path)
file_path = '%s/delf_weights' % FLAGS.logdir
model.save_weights(file_path, save_format='tf')
logging.info('Saved weights (%d) at %s', global_step_value,
file_path)
# Reset metrics for next step.
desc_train_accuracy.reset_states()
attn_train_accuracy.reset_states()
desc_validation_loss.reset_states()
attn_validation_loss.reset_states()
desc_validation_accuracy.reset_states()
attn_validation_accuracy.reset_states()
logging.info('Finished training for %d steps.', max_iters)
if __name__ == '__main__':
app.run(main)
| 22,754 | 39.706619 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features/train_utils.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utilities for Global Features model."""
import os
import pickle
import time
import numpy as np
import tensorflow as tf
from delf.python import whiten
from delf.python.datasets.revisited_op import dataset as test_dataset
from delf.python.datasets.sfm120k import sfm120k
from delf.python.training import global_features_utils
from delf.python.training.model import global_model
def _compute_loss_and_gradient(criterion, model, input, target, neg_num=5):
"""Records gradients and loss through the network.
Args:
criterion: Loss function.
model: Network for the gradient computation.
input: Tuple of query, positive and negative images.
target: List of indexes to specify queries (-1), positives(1), negatives(0).
neg_num: Integer, number of negatives per a tuple.
Returns:
loss: Loss for the training step.
gradients: Computed gradients for the network trainable variables.
"""
# Record gradients and loss through the network.
with tf.GradientTape() as tape:
descriptors = tf.zeros(shape=(0, model.meta['outputdim']), dtype=tf.float32)
for img in input:
# Compute descriptor vector for each image.
o = model(tf.expand_dims(img, axis=0), training=True)
descriptors = tf.concat([descriptors, o], 0)
queries = descriptors[target == -1]
positives = descriptors[target == 1]
negatives = descriptors[target == 0]
negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num,
model.meta['outputdim']])
# Loss calculation.
loss = criterion(queries, positives, negatives)
return loss, tape.gradient(loss, model.trainable_variables)
def train_val_one_epoch(
loader, model, criterion, optimizer, epoch, train=True, batch_size=5,
query_size=2000, neg_num=5, update_every=1, debug=False):
"""Executes either training or validation step based on `train` value.
Args:
loader: Training/validation iterable dataset.
model: Network to train/validate.
criterion: Loss function.
optimizer: Network optimizer.
epoch: Integer, epoch number.
train: Bool, specifies training or validation phase.
batch_size: Integer, number of (q,p,n1,...,nN) tuples in a mini-batch.
query_size: Integer, number of queries randomly drawn per one training
epoch.
neg_num: Integer, number of negatives per a tuple.
update_every: Integer, update model weights every N batches, used to
handle relatively large batches batch_size effectively becomes
update_every x batch_size.
debug: Bool, whether debug mode is used.
Returns:
average_epoch_loss: Average epoch loss.
"""
batch_time = global_features_utils.AverageMeter()
data_time = global_features_utils.AverageMeter()
losses = global_features_utils.AverageMeter()
# Retrieve all trainable variables we defined in the graph.
tvs = model.trainable_variables
accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs]
end = time.time()
batch_num = 0
print_frequency = 10
all_batch_num = query_size // batch_size
state = 'Train' if train else 'Val'
global_features_utils.debug_and_log('>> {} step:'.format(state))
# For every batch in the dataset; Stops when all batches in the dataset have
# been processed.
while True:
data_time.update(time.time() - end)
if train:
try:
# Train on one batch.
# Each image in the batch is loaded into memory consecutively.
for _ in range(batch_size):
# Because the images are not necessarily of the same size, we can't
# set the batch size with .batch().
batch = loader.get_next()
input_tuple = batch[0:-1]
target_tuple = batch[-1]
loss_value, grads = _compute_loss_and_gradient(
criterion, model, input_tuple, target_tuple, neg_num)
losses.update(loss_value)
# Accumulate gradients.
accum_grads += grads
# Perform weight update if required.
if (batch_num + 1) % update_every == 0 or (
batch_num + 1) == all_batch_num:
# Do one step for multiple batches. Accumulated gradients are
# used.
optimizer.apply_gradients(
zip(accum_grads, model.trainable_variables))
accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs]
# We break when we run out of range, i.e., we exhausted all dataset
# images.
except tf.errors.OutOfRangeError:
break
else:
# Validate one batch.
# We load full batch into memory.
input = []
target = []
try:
for _ in range(batch_size):
# Because the images are not necessarily of the same size, we can't
# set the batch size with .batch().
batch = loader.get_next()
input.append(batch[0:-1])
target.append(batch[-1])
# We break when we run out of range, i.e., we exhausted all dataset
# images.
except tf.errors.OutOfRangeError:
break
descriptors = tf.zeros(shape=(0, model.meta['outputdim']),
dtype=tf.float32)
for input_tuple in input:
for img in input_tuple:
# Compute the global descriptor vector.
model_out = model(tf.expand_dims(img, axis=0), training=False)
descriptors = tf.concat([descriptors, model_out], 0)
# No need to reduce memory consumption (no backward pass):
# Compute loss for the full batch.
queries = descriptors[target == -1]
positives = descriptors[target == 1]
negatives = descriptors[target == 0]
negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num,
model.meta['outputdim']])
loss = criterion(queries, positives, negatives)
# Record loss.
losses.update(loss / batch_size, batch_size)
# Measure elapsed time.
batch_time.update(time.time() - end)
end = time.time()
# Record immediate loss and elapsed time.
if debug and ((batch_num + 1) % print_frequency == 0 or
batch_num == 0 or (batch_num + 1) == all_batch_num):
global_features_utils.debug_and_log(
'>> {0}: [{1} epoch][{2}/{3} batch]\t Time val: {'
'batch_time.val:.3f} '
'(Batch Time avg: {batch_time.avg:.3f})\t Data {'
'data_time.val:.3f} ('
'Time avg: {data_time.avg:.3f})\t Immediate loss value: {'
'loss.val:.4f} '
'(Loss avg: {loss.avg:.4f})'.format(
state, epoch, batch_num + 1, all_batch_num,
batch_time=batch_time,
data_time=data_time, loss=losses), debug=True, log=False)
batch_num += 1
return losses.avg
def test_retrieval(datasets, net, epoch, writer=None, model_directory=None,
precompute_whitening=None, data_root='data', multiscale=[1.],
test_image_size=1024):
"""Testing step.
Evaluates the network on the provided test datasets by computing single-scale
mAP for easy/medium/hard cases. If `writer` is specified, saves the mAP
values in a tensorboard supported format.
Args:
datasets: List of dataset names for model testing (from
`_TEST_DATASET_NAMES`).
net: Network to evaluate.
epoch: Integer, epoch number.
writer: Tensorboard writer.
model_directory: String, path to the model directory.
precompute_whitening: Dataset used to learn whitening. If no
precomputation required, then `None`. Only 'retrieval-SfM-30k' and
'retrieval-SfM-120k' datasets are supported for whitening pre-computation.
data_root: Absolute path to the data folder.
multiscale: List of scales for multiscale testing.
test_image_size: Integer, maximum size of the test images.
"""
global_features_utils.debug_and_log(">> Testing step:")
global_features_utils.debug_and_log(
'>> Evaluating network on test datasets...')
# Precompute whitening.
if precompute_whitening is not None:
# If whitening already precomputed, load it and skip the computations.
filename = os.path.join(
model_directory, 'learned_whitening_mP_{}_epoch.pkl'.format(epoch))
filename_layer = os.path.join(
model_directory,
'learned_whitening_layer_config_{}_epoch.pkl'.format(
epoch))
if tf.io.gfile.exists(filename):
global_features_utils.debug_and_log(
'>> {}: Whitening for this epoch is already precomputed. '
'Loading...'.format(precompute_whitening))
with tf.io.gfile.GFile(filename, 'rb') as learned_whitening_file:
learned_whitening = pickle.load(learned_whitening_file)
else:
start = time.time()
global_features_utils.debug_and_log(
'>> {}: Learning whitening...'.format(precompute_whitening))
# Loading db.
db_root = os.path.join(data_root, 'train', precompute_whitening)
ims_root = os.path.join(db_root, 'ims')
db_filename = os.path.join(db_root,
'{}-whiten.pkl'.format(precompute_whitening))
with tf.io.gfile.GFile(db_filename, 'rb') as f:
db = pickle.load(f)
images = [sfm120k.id2filename(db['cids'][i], ims_root) for i in
range(len(db['cids']))]
# Extract whitening vectors.
global_features_utils.debug_and_log(
'>> {}: Extracting...'.format(precompute_whitening))
wvecs = global_model.extract_global_descriptors_from_list(net, images,
test_image_size)
# Learning whitening.
global_features_utils.debug_and_log(
'>> {}: Learning...'.format(precompute_whitening))
wvecs = wvecs.numpy()
mean_vector, projection_matrix = whiten.whitenlearn(wvecs, db['qidxs'],
db['pidxs'])
learned_whitening = {'m': mean_vector, 'P': projection_matrix}
global_features_utils.debug_and_log(
'>> {}: Elapsed time: {}'.format(precompute_whitening,
global_features_utils.htime(
time.time() - start)))
# Save learned_whitening parameters for a later use.
with tf.io.gfile.GFile(filename, 'wb') as learned_whitening_file:
pickle.dump(learned_whitening, learned_whitening_file)
# Saving whitening as a layer.
bias = -np.dot(mean_vector.T, projection_matrix.T)
whitening_layer = tf.keras.layers.Dense(
net.meta['outputdim'],
activation=None,
use_bias=True,
kernel_initializer=tf.keras.initializers.Constant(
projection_matrix.T),
bias_initializer=tf.keras.initializers.Constant(bias)
)
with tf.io.gfile.GFile(filename_layer, 'wb') as learned_whitening_file:
pickle.dump(whitening_layer.get_config(), learned_whitening_file)
else:
learned_whitening = None
# Evaluate on test datasets.
for dataset in datasets:
start = time.time()
# Prepare config structure for the test dataset.
cfg = test_dataset.CreateConfigForTestDataset(dataset,
os.path.join(data_root))
images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
bounding_boxes = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
# Extract database and query vectors.
global_features_utils.debug_and_log(
'>> {}: Extracting database images...'.format(dataset))
vecs = global_model.extract_global_descriptors_from_list(
net, images, test_image_size, scales=multiscale)
global_features_utils.debug_and_log(
'>> {}: Extracting query images...'.format(dataset))
qvecs = global_model.extract_global_descriptors_from_list(
net, qimages, test_image_size, bounding_boxes,
scales=multiscale)
global_features_utils.debug_and_log('>> {}: Evaluating...'.format(dataset))
# Convert the obtained descriptors to numpy.
vecs = vecs.numpy()
qvecs = qvecs.numpy()
# Search, rank and print test set metrics.
_calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg,
writer, epoch, whiten=False)
if learned_whitening is not None:
# Whiten the vectors.
mean_vector = learned_whitening['m']
projection_matrix = learned_whitening['P']
vecs_lw = whiten.whitenapply(vecs, mean_vector, projection_matrix)
qvecs_lw = whiten.whitenapply(qvecs, mean_vector, projection_matrix)
# Search, rank, and print.
_calculate_metrics_and_export_to_tensorboard(
vecs_lw, qvecs_lw, dataset, cfg, writer, epoch, whiten=True)
global_features_utils.debug_and_log(
'>> {}: Elapsed time: {}'.format(
dataset, global_features_utils.htime(time.time() - start)))
def _calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg,
writer, epoch, whiten=False):
"""
Calculates metrics and exports them to tensorboard.
Args:
vecs: Numpy array dataset global descriptors.
qvecs: Numpy array query global descriptors.
dataset: String, one of `_TEST_DATASET_NAMES`.
cfg: Dataset configuration.
writer: Tensorboard writer.
epoch: Integer, epoch number.
whiten: Boolean, whether the metrics are with for whitening used as a
post-processing step. Affects the name of the extracted TensorBoard
metrics.
"""
# Search, rank and print test set metrics.
scores = np.dot(vecs.T, qvecs)
ranks = np.transpose(np.argsort(-scores, axis=0))
metrics = global_features_utils.compute_metrics_and_print(dataset, ranks,
cfg['gnd'])
# Save calculated metrics in a tensorboard format.
if writer:
if whiten:
metric_names = ['test_accuracy_whiten_{}_E'.format(dataset),
'test_accuracy_whiten_{}_M'.format(dataset),
'test_accuracy_whiten_{}_H'.format(dataset)]
else:
metric_names = ['test_accuracy_{}_E'.format(dataset),
'test_accuracy_{}_M'.format(dataset),
'test_accuracy_{}_H'.format(dataset)]
tf.summary.scalar(metric_names[0], metrics[0][0], step=epoch)
tf.summary.scalar(metric_names[1], metrics[1][0], step=epoch)
tf.summary.scalar(metric_names[2], metrics[2][0], step=epoch)
writer.flush()
return None
| 15,531 | 39.553525 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 827 | 40.4 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features/train.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for Global Features model."""
import math
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from delf.python.datasets.sfm120k import dataset_download
from delf.python.datasets.sfm120k import sfm120k
from delf.python.training import global_features_utils
from delf.python.training import tensorboard_utils
from delf.python.training.global_features import train_utils
from delf.python.training.losses import ranking_losses
from delf.python.training.model import global_model
_LOSS_NAMES = ['contrastive', 'triplet']
_MODEL_NAMES = global_features_utils.get_standard_keras_models()
_OPTIMIZER_NAMES = ['sgd', 'adam']
_POOL_NAMES = ['mac', 'spoc', 'gem']
_PRECOMPUTE_WHITEN_NAMES = ['retrieval-SfM-30k', 'retrieval-SfM-120k']
_TEST_DATASET_NAMES = ['roxford5k', 'rparis6k']
_TRAINING_DATASET_NAMES = ['retrieval-SfM-120k']
_VALIDATION_TYPES = ['standard', 'eccv2020']
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug', False, 'Debug mode.')
# Export directory, training and val datasets, test datasets.
flags.DEFINE_string('data_root', "data",
'Absolute path to the folder containing training data.')
flags.DEFINE_string('directory', "data",
'Destination where trained network should be saved.')
flags.DEFINE_enum('training_dataset', 'retrieval-SfM-120k',
_TRAINING_DATASET_NAMES, 'Training dataset: ' +
' | '.join(_TRAINING_DATASET_NAMES) + '.')
flags.DEFINE_enum('validation_type', None, _VALIDATION_TYPES,
'Type of the evaluation to use. Either `None`, `standard` '
'or `eccv2020`.')
flags.DEFINE_list('test_datasets', 'roxford5k,rparis6k',
'Comma separated list of test datasets: ' +
' | '.join(_TEST_DATASET_NAMES) + '.')
flags.DEFINE_enum('precompute_whitening', None, _PRECOMPUTE_WHITEN_NAMES,
'Dataset used to learn whitening: ' +
' | '.join(_PRECOMPUTE_WHITEN_NAMES) + '.')
flags.DEFINE_integer('test_freq', 5,
'Run test evaluation every N epochs.')
flags.DEFINE_list('multiscale', [1.],
'Use multiscale vectors for testing, ' +
' examples: 1 | 1,1/2**(1/2),1/2 | 1,2**(1/2),1/2**(1/2)]. '
'Pass as a string of comma separated values.')
# Network architecture and initialization options.
flags.DEFINE_enum('arch', 'ResNet101', _MODEL_NAMES,
'Model architecture: ' + ' | '.join(_MODEL_NAMES) + '.')
flags.DEFINE_enum('pool', 'gem', _POOL_NAMES,
'Pooling options: ' + ' | '.join(_POOL_NAMES) + '.')
flags.DEFINE_bool('whitening', False,
'Whether to train model with learnable whitening ('
'linear layer) after the pooling.')
flags.DEFINE_bool('pretrained', True,
'Whether to initialize model with random weights ('
'default: pretrained on imagenet).')
flags.DEFINE_enum('loss', 'contrastive', _LOSS_NAMES,
'Training loss options: ' + ' | '.join(_LOSS_NAMES) + '.')
flags.DEFINE_float('loss_margin', 0.7, 'Loss margin.')
# train/val options specific for image retrieval learning.
flags.DEFINE_integer('image_size', 1024,
'Maximum size of longer image side used for training.')
flags.DEFINE_integer('neg_num', 5, 'Number of negative images per train/val '
'tuple.')
flags.DEFINE_integer('query_size', 2000,
'Number of queries randomly drawn per one training epoch.')
flags.DEFINE_integer('pool_size', 20000,
'Size of the pool for hard negative mining.')
# Standard training/validation options.
flags.DEFINE_string('gpu_id', '0', 'GPU id used for training.')
flags.DEFINE_integer('epochs', 100, 'Number of total epochs to run.')
flags.DEFINE_integer('batch_size', 5,
'Number of (q,p,n1,...,nN) tuples in a mini-batch.')
flags.DEFINE_integer('update_every', 1,
'Update model weights every N batches, used to handle '
'relatively large batches, batch_size effectively '
'becomes update_every `x` batch_size.')
flags.DEFINE_enum('optimizer', 'adam', _OPTIMIZER_NAMES,
'Optimizer options: ' + ' | '.join(_OPTIMIZER_NAMES) + '.')
flags.DEFINE_float('lr', 1e-6, 'Initial learning rate.')
flags.DEFINE_float('momentum', 0.9, 'Momentum.')
flags.DEFINE_float('weight_decay', 1e-6, 'Weight decay.')
flags.DEFINE_bool('resume', False,
'Whether to start from the latest checkpoint in the logdir.')
flags.DEFINE_bool('launch_tensorboard', False, 'Whether to launch tensorboard.')
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Manually check if there are unknown test datasets and if the dataset
# ground truth files are downloaded.
for dataset in FLAGS.test_datasets:
if dataset not in _TEST_DATASET_NAMES:
raise ValueError('Unsupported or unknown test dataset: {}.'.format(
dataset))
test_data_config = os.path.join(FLAGS.data_root,
'gnd_{}.pkl'.format(dataset))
if not tf.io.gfile.exists(test_data_config):
raise ValueError(
'{} ground truth file at {} not found. Please download it '
'according to '
'the DELG instructions.'.format(dataset, FLAGS.data_root))
# Check if train dataset is downloaded and download it if not found.
dataset_download.download_train(FLAGS.data_root)
# Creating model export directory if it does not exist.
model_directory = global_features_utils.create_model_directory(
FLAGS.training_dataset, FLAGS.arch, FLAGS.pool, FLAGS.whitening,
FLAGS.pretrained, FLAGS.loss, FLAGS.loss_margin, FLAGS.optimizer,
FLAGS.lr, FLAGS.weight_decay, FLAGS.neg_num, FLAGS.query_size,
FLAGS.pool_size, FLAGS.batch_size, FLAGS.update_every,
FLAGS.image_size, FLAGS.directory)
# Setting up logging directory, same as where the model is stored.
logging.get_absl_handler().use_absl_log_file('absl_logging', model_directory)
# Set cuda visible device.
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_id
global_features_utils.debug_and_log('>> Num GPUs Available: {}'.format(
len(tf.config.experimental.list_physical_devices('GPU'))),
FLAGS.debug)
# Set random seeds.
tf.random.set_seed(0)
np.random.seed(0)
# Initialize the model.
if FLAGS.pretrained:
global_features_utils.debug_and_log(
'>> Using pre-trained model \'{}\''.format(FLAGS.arch))
else:
global_features_utils.debug_and_log(
'>> Using model from scratch (random weights) \'{}\'.'.format(
FLAGS.arch))
model_params = {'architecture': FLAGS.arch, 'pooling': FLAGS.pool,
'whitening': FLAGS.whitening, 'pretrained': FLAGS.pretrained,
'data_root': FLAGS.data_root}
model = global_model.GlobalFeatureNet(**model_params)
# Freeze running mean and std in batch normalization layers.
# We do training one image at a time to improve memory requirements of
# the network; therefore, the computed statistics would not be per a
# batch. Instead, we choose freezing - setting the parameters of all
# batch norm layers in the network to non-trainable (i.e., using original
# imagenet statistics).
for layer in model.feature_extractor.layers:
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
global_features_utils.debug_and_log('>> Network initialized.')
global_features_utils.debug_and_log('>> Loss: {}.'.format(FLAGS.loss))
# Define the loss function.
if FLAGS.loss == 'contrastive':
criterion = ranking_losses.ContrastiveLoss(margin=FLAGS.loss_margin)
elif FLAGS.loss == 'triplet':
criterion = ranking_losses.TripletLoss(margin=FLAGS.loss_margin)
else:
raise ValueError('Loss {} not available.'.format(FLAGS.loss))
# Defining parameters for the training.
# When pre-computing whitening, we run evaluation before the network training
# and the `start_epoch` is set to 0. In other cases, we start from epoch 1.
start_epoch = 1
exp_decay = math.exp(-0.01)
decay_steps = FLAGS.query_size / FLAGS.batch_size
# Define learning rate decay schedule.
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=FLAGS.lr,
decay_steps=decay_steps,
decay_rate=exp_decay)
# Define the optimizer.
if FLAGS.optimizer == 'sgd':
opt = tfa.optimizers.extend_with_decoupled_weight_decay(
tf.keras.optimizers.SGD)
optimizer = opt(weight_decay=FLAGS.weight_decay,
learning_rate=lr_scheduler, momentum=FLAGS.momentum)
elif FLAGS.optimizer == 'adam':
opt = tfa.optimizers.extend_with_decoupled_weight_decay(
tf.keras.optimizers.Adam)
optimizer = opt(weight_decay=FLAGS.weight_decay, learning_rate=lr_scheduler)
else:
raise ValueError('Optimizer {} not available.'.format(FLAGS.optimizer))
# Initializing logging.
writer = tf.summary.create_file_writer(model_directory)
tf.summary.experimental.set_step(1)
# Setting up the checkpoint manager.
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint,
model_directory,
max_to_keep=10,
keep_checkpoint_every_n_hours=3)
if FLAGS.resume:
# Restores the checkpoint, if existing.
global_features_utils.debug_and_log('>> Continuing from a checkpoint.')
checkpoint.restore(manager.latest_checkpoint)
# Launching tensorboard if required.
if FLAGS.launch_tensorboard:
tensorboard = tf.keras.callbacks.TensorBoard(model_directory)
tensorboard.set_model(model=model)
tensorboard_utils.launch_tensorboard(log_dir=model_directory)
# Log flags used.
global_features_utils.debug_and_log('>> Running training script with:')
global_features_utils.debug_and_log('>> logdir = {}'.format(model_directory))
if FLAGS.training_dataset.startswith('retrieval-SfM-120k'):
train_dataset = sfm120k.CreateDataset(
data_root=FLAGS.data_root,
mode='train',
imsize=FLAGS.image_size,
num_negatives=FLAGS.neg_num,
num_queries=FLAGS.query_size,
pool_size=FLAGS.pool_size
)
if FLAGS.validation_type is not None:
val_dataset = sfm120k.CreateDataset(
data_root=FLAGS.data_root,
mode='val',
imsize=FLAGS.image_size,
num_negatives=FLAGS.neg_num,
num_queries=float('Inf'),
pool_size=float('Inf'),
eccv2020=True if FLAGS.validation_type == 'eccv2020' else False
)
train_dataset_output_types = [tf.float32 for i in range(2 + FLAGS.neg_num)]
train_dataset_output_types.append(tf.int32)
global_features_utils.debug_and_log(
'>> Training the {} network'.format(model_directory))
global_features_utils.debug_and_log('>> GPU ids: {}'.format(FLAGS.gpu_id))
with writer.as_default():
# Precompute whitening if needed.
if FLAGS.precompute_whitening is not None:
epoch = 0
train_utils.test_retrieval(
FLAGS.test_datasets, model, writer=writer,
epoch=epoch, model_directory=model_directory,
precompute_whitening=FLAGS.precompute_whitening,
data_root=FLAGS.data_root,
multiscale=FLAGS.multiscale)
for epoch in range(start_epoch, FLAGS.epochs + 1):
# Set manual seeds per epoch.
np.random.seed(epoch)
tf.random.set_seed(epoch)
# Find hard-negatives.
# While hard-positive examples are fixed during the whole training
# process and are randomly chosen from every epoch; hard-negatives
# depend on the current CNN parameters and are re-mined once per epoch.
avg_neg_distance = train_dataset.create_epoch_tuples(model)
def _train_gen():
return (inst for inst in train_dataset)
train_loader = tf.data.Dataset.from_generator(
_train_gen,
output_types=tuple(train_dataset_output_types))
loss = train_utils.train_val_one_epoch(
loader=iter(train_loader), model=model,
criterion=criterion, optimizer=optimizer, epoch=epoch,
batch_size=FLAGS.batch_size, query_size=FLAGS.query_size,
neg_num=FLAGS.neg_num, update_every=FLAGS.update_every,
debug=FLAGS.debug)
# Write a scalar summary.
tf.summary.scalar('train_epoch_loss', loss, step=epoch)
# Forces summary writer to send any buffered data to storage.
writer.flush()
# Evaluate on validation set.
if FLAGS.validation_type is not None and (epoch % FLAGS.test_freq == 0 or
epoch == 1):
avg_neg_distance = val_dataset.create_epoch_tuples(model,
model_directory)
def _val_gen():
return (inst for inst in val_dataset)
val_loader = tf.data.Dataset.from_generator(
_val_gen, output_types=tuple(train_dataset_output_types))
loss = train_utils.train_val_one_epoch(
loader=iter(val_loader), model=model,
criterion=criterion, optimizer=None,
epoch=epoch, train=False, batch_size=FLAGS.batch_size,
query_size=FLAGS.query_size, neg_num=FLAGS.neg_num,
update_every=FLAGS.update_every, debug=FLAGS.debug)
tf.summary.scalar('val_epoch_loss', loss, step=epoch)
writer.flush()
# Evaluate on test datasets every test_freq epochs.
if epoch == 1 or epoch % FLAGS.test_freq == 0:
train_utils.test_retrieval(
FLAGS.test_datasets, model, writer=writer, epoch=epoch,
model_directory=model_directory,
precompute_whitening=FLAGS.precompute_whitening,
data_root=FLAGS.data_root, multiscale=FLAGS.multiscale)
# Saving checkpoints and model weights.
try:
save_path = manager.save(checkpoint_number=epoch)
global_features_utils.debug_and_log(
'Saved ({}) at {}'.format(epoch, save_path))
filename = os.path.join(model_directory,
'checkpoint_epoch_{}.h5'.format(epoch))
model.save_weights(filename, save_format='h5')
global_features_utils.debug_and_log(
'Saved weights ({}) at {}'.format(epoch, filename))
except Exception as ex:
global_features_utils.debug_and_log(
'Could not save checkpoint: {}'.format(ex))
if __name__ == '__main__':
app.run(main)
| 15,702 | 42.258953 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_global_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global feature tensorflow inference model.
The exported model may leverage image pyramids for multi-scale processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights',
'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
flags.DEFINE_boolean('delg_global_features', False,
'Whether the model uses a DELG-like global feature head.')
flags.DEFINE_float(
'delg_gem_power', 3.0,
'Power for Generalized Mean pooling. Used only if --delg_global_features'
'is present.')
flags.DEFINE_integer(
'delg_embedding_layer_dim', 2048,
'Size of the FC whitening layer (embedding layer). Used only if'
'--delg_global_features is present.')
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None,
delg_global_features=False,
delg_gem_power=3.0,
delg_embedding_layer_dim=2048):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global descriptor.
input_scales_tensor: If None, the exported function to be used should be
ExtractFeatures, where an input end-point "input_scales" is added for
the exported model. If not None, the specified 1D tensor of floats will
be hard-coded as the desired input scales, in conjunction with
ExtractFeaturesFixedScales.
delg_global_features: Whether the model uses a DELG-like global feature
head.
delg_gem_power: Power for Generalized Mean pooling in the DELG model. Used
only if 'delg_global_features' is True.
delg_embedding_layer_dim: Size of the FC whitening layer (embedding
layer). Used only if 'delg_global_features' is True.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
# Setup the DELF model for extraction.
if delg_global_features:
self._model = delg_model.Delg(
block3_strides=False,
name='DELG',
gem_power=delg_gem_power,
embedding_layer_dim=delg_embedding_layer_dim)
else:
self._model = delf_model.Delf(block3_strides=False, name='DELF')
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model.backbone.build_call(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor)
named_output_tensors = {}
if self._multi_scale_pool_type == 'None':
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
else:
named_output_tensors['global_descriptor'] = tf.identity(
extracted_features, name='global_descriptor')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor, FLAGS.delg_global_features,
FLAGS.delg_gem_power, FLAGS.delg_embedding_layer_dim)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| 7,294 | 38.646739 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delf_model_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DELF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from delf.python.training.model import delf_model
class DelfTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_build_model(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
model = delf_model.Delf(block3_strides=block3_strides, name='DELF')
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
blocks = {}
# Get global feature by pooling block4 features.
desc_prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
desc_logits = model.desc_classification(desc_prelogits)
self.assertAllEqual(desc_prelogits.shape, (batch_size, 2048))
self.assertAllEqual(desc_logits.shape, (batch_size, num_classes))
features = blocks['block3']
attn_prelogits, _, _ = model.attention(features)
attn_logits = model.attn_classification(attn_prelogits)
self.assertAllEqual(attn_prelogits.shape, (batch_size, 1024))
self.assertAllEqual(attn_logits.shape, (batch_size, num_classes))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_train_step(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
clip_val = 10.0
input_shape = (batch_size, image_size, image_size, 3)
model = delf_model.Delf(block3_strides=block3_strides, name='DELF')
model.init_classifiers(num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
images = tf.random.uniform(input_shape, minval=0.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=batch_size)
with tf.GradientTape() as gradient_tape:
(desc_prelogits, attn_prelogits, _, _, _,
_) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
desc_logits = model.desc_classification(desc_prelogits)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Cumulate global loss and attention loss and backpropagate through the
# descriptor layer and attention layer together.
total_loss = desc_loss + attn_loss
gradients = gradient_tape.gradient(total_loss, model.trainable_weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, model.trainable_weights))
if __name__ == '__main__':
tf.test.main()
| 4,198 | 37.522936 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delf_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELF model implementation based on the following paper.
Large-Scale Image Retrieval with Attentive Deep Local Features
https://arxiv.org/abs/1612.06321
"""
import tensorflow as tf
from delf.python.training.model import resnet50 as resnet
layers = tf.keras.layers
reg = tf.keras.regularizers
_DECAY = 0.0001
class AttentionModel(tf.keras.Model):
"""Instantiates attention model.
Uses two [kernel_size x kernel_size] convolutions and softplus as activation
to compute an attention map with the same resolution as the featuremap.
Features l2-normalized and aggregated using attention probabilites as weights.
The features (targets) to be aggregated can be the input featuremap, or a
different one with the same resolution.
"""
def __init__(self, kernel_size=1, decay=_DECAY, name='attention'):
"""Initialization of attention model.
Args:
kernel_size: int, kernel size of convolutions.
decay: float, decay for l2 regularization of kernel weights.
name: str, name to identify model.
"""
super(AttentionModel, self).__init__(name=name)
# First convolutional layer (called with relu activation).
self.conv1 = layers.Conv2D(
512,
kernel_size,
kernel_regularizer=reg.l2(decay),
padding='same',
name='attn_conv1')
self.bn_conv1 = layers.BatchNormalization(axis=3, name='bn_conv1')
# Second convolutional layer, with softplus activation.
self.conv2 = layers.Conv2D(
1,
kernel_size,
kernel_regularizer=reg.l2(decay),
padding='same',
name='attn_conv2')
self.activation_layer = layers.Activation('softplus')
def call(self, inputs, targets=None, training=True):
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
score = self.conv2(x)
prob = self.activation_layer(score)
# Aggregate inputs if targets is None.
if targets is None:
targets = inputs
# L2-normalize the featuremap before pooling.
targets = tf.nn.l2_normalize(targets, axis=-1)
feat = tf.reduce_mean(tf.multiply(targets, prob), [1, 2], keepdims=False)
return feat, prob, score
class AutoencoderModel(tf.keras.Model):
"""Instantiates the Keras Autoencoder model."""
def __init__(self, reduced_dimension, expand_dimension, kernel_size=1,
name='autoencoder'):
"""Initialization of Autoencoder model.
Args:
reduced_dimension: int, the output dimension of the autoencoder layer.
expand_dimension: int, the input dimension of the autoencoder layer.
kernel_size: int or tuple, height and width of the 2D convolution window.
name: str, name to identify model.
"""
super(AutoencoderModel, self).__init__(name=name)
self.conv1 = layers.Conv2D(
reduced_dimension,
kernel_size,
padding='same',
name='autoenc_conv1')
self.conv2 = layers.Conv2D(
expand_dimension,
kernel_size,
activation=tf.keras.activations.relu,
padding='same',
name='autoenc_conv2')
def call(self, inputs):
dim_reduced_features = self.conv1(inputs)
dim_expanded_features = self.conv2(dim_reduced_features)
return dim_expanded_features, dim_reduced_features
class Delf(tf.keras.Model):
"""Instantiates Keras DELF model using ResNet50 as backbone.
This class implements the [DELF](https://arxiv.org/abs/1612.06321) model for
extracting local features from images. The backbone is a ResNet50 network
that extracts featuremaps from both conv_4 and conv_5 layers. Activations
from conv_4 are used to compute an attention map of the same resolution.
"""
def __init__(self,
block3_strides=True,
name='DELF',
pooling='avg',
gem_power=3.0,
embedding_layer=False,
embedding_layer_dim=2048,
use_dim_reduction=False,
reduced_dimension=128,
dim_expand_channels=1024):
"""Initialization of DELF model.
Args:
block3_strides: bool, whether to add strides to the output of block3.
name: str, name to identify model.
pooling: str, pooling mode for global feature extraction; possible values
are 'None', 'avg', 'max', 'gem.'
gem_power: float, GeM power for GeM pooling. Only used if pooling ==
'gem'.
embedding_layer: bool, whether to create an embedding layer (FC whitening
layer).
embedding_layer_dim: int, size of the embedding layer.
use_dim_reduction: Whether to integrate dimensionality reduction layers.
If True, extra layers are added to reduce the dimensionality of the
extracted features.
reduced_dimension: int, only used if use_dim_reduction is True. The output
dimension of the autoencoder layer.
dim_expand_channels: int, only used if use_dim_reduction is True. The
number of channels of the backbone block used. Default value 1024 is the
number of channels of backbone block 'block3'.
"""
super(Delf, self).__init__(name=name)
# Backbone using Keras ResNet50.
self.backbone = resnet.ResNet50(
'channels_last',
name='backbone',
include_top=False,
pooling=pooling,
block3_strides=block3_strides,
average_pooling=False,
gem_power=gem_power,
embedding_layer=embedding_layer,
embedding_layer_dim=embedding_layer_dim)
# Attention model.
self.attention = AttentionModel(name='attention')
# Autoencoder model.
self._use_dim_reduction = use_dim_reduction
if self._use_dim_reduction:
self.autoencoder = AutoencoderModel(reduced_dimension,
dim_expand_channels,
name='autoencoder')
def init_classifiers(self, num_classes, desc_classification=None):
"""Define classifiers for training backbone and attention models."""
self.num_classes = num_classes
if desc_classification is None:
self.desc_classification = layers.Dense(
num_classes, activation=None, kernel_regularizer=None, name='desc_fc')
else:
self.desc_classification = desc_classification
self.attn_classification = layers.Dense(
num_classes, activation=None, kernel_regularizer=None, name='att_fc')
def global_and_local_forward_pass(self, images, training=True):
"""Run a forward to calculate global descriptor and attention prelogits.
Args:
images: Tensor containing the dataset on which to run the forward pass.
training: Indicator of wether the forward pass is running in training mode
or not.
Returns:
Global descriptor prelogits, attention prelogits, attention scores,
backbone weights.
"""
backbone_blocks = {}
desc_prelogits = self.backbone.build_call(
images, intermediates_dict=backbone_blocks, training=training)
# Prevent gradients from propagating into the backbone. See DELG paper:
# https://arxiv.org/abs/2001.05027.
block3 = backbone_blocks['block3'] # pytype: disable=key-error
block3 = tf.stop_gradient(block3)
if self._use_dim_reduction:
(dim_expanded_features, dim_reduced_features) = self.autoencoder(block3)
attn_prelogits, attn_scores, _ = self.attention(
block3,
targets=dim_expanded_features,
training=training)
else:
attn_prelogits, attn_scores, _ = self.attention(block3, training=training)
dim_expanded_features = None
dim_reduced_features = None
return (desc_prelogits, attn_prelogits, attn_scores, backbone_blocks,
dim_expanded_features, dim_reduced_features)
def build_call(self, input_image, training=True):
(global_feature, _, attn_scores, backbone_blocks, _,
dim_reduced_features) = self.global_and_local_forward_pass(input_image,
training)
if self._use_dim_reduction:
features = dim_reduced_features
else:
features = backbone_blocks['block3'] # pytype: disable=key-error
return global_feature, attn_scores, features
def call(self, input_image, training=True):
_, probs, features = self.build_call(input_image, training=training)
return probs, features
| 9,075 | 36.659751 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_local_and_global_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export DELG tensorflow inference model.
The exported model can be used to jointly extract local and global features. It
may use an image pyramid for multi-scale processing, and will include receptive
field calculation and keypoint selection for the local feature head.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'ckpt_path', '/tmp/delf-logdir/delf-weights', 'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_boolean(
'delg_global_features', True,
'Whether the model uses a DELG-like global feature head.')
flags.DEFINE_float(
'delg_gem_power', 3.0,
'Power for Generalized Mean pooling. Used only if --delg_global_features'
'is present.')
flags.DEFINE_integer(
'delg_embedding_layer_dim', 2048,
'Size of the FC whitening layer (embedding layer). Used only if'
'--delg_global_features is present.')
flags.DEFINE_boolean(
'block3_strides', True,
'Whether to apply strides after block3, used for local feature head.')
flags.DEFINE_float(
'iou', 1.0, 'IOU for non-max suppression used in local feature head.')
flags.DEFINE_boolean(
'use_autoencoder', True,
'Whether the exported model should use an autoencoder.')
flags.DEFINE_float(
'autoencoder_dimensions', 128,
'Number of dimensions of the autoencoder. Used only if'
'use_autoencoder=True.')
flags.DEFINE_float(
'local_feature_map_channels', 1024,
'Number of channels at backbone layer used for local feature extraction. '
'Default value 1024 is the number of channels of block3. Used only if'
'use_autoencoder=True.')
class _ExtractModule(tf.Module):
"""Helper module to build and save DELG model."""
def __init__(self,
delg_global_features=True,
delg_gem_power=3.0,
delg_embedding_layer_dim=2048,
block3_strides=True,
iou=1.0):
"""Initialization of DELG model.
Args:
delg_global_features: Whether the model uses a DELG-like global feature
head.
delg_gem_power: Power for Generalized Mean pooling in the DELG model. Used
only if 'delg_global_features' is True.
delg_embedding_layer_dim: Size of the FC whitening layer (embedding
layer). Used only if 'delg_global_features' is True.
block3_strides: bool, whether to add strides to the output of block3.
iou: IOU for non-max suppression.
"""
self._stride_factor = 2.0 if block3_strides else 1.0
self._iou = iou
# Setup the DELG model for extraction.
if delg_global_features:
self._model = delg_model.Delg(
block3_strides=block3_strides,
name='DELG',
gem_power=delg_gem_power,
embedding_layer_dim=delg_embedding_layer_dim,
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
else:
self._model = delf_model.Delf(
block3_strides=block3_strides,
name='DELF',
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(shape=(), dtype=tf.int32, name='input_max_feature_num'),
tf.TensorSpec(shape=(), dtype=tf.float32, name='input_abs_thres'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_max_feature_num,
input_abs_thres, input_global_scales_ind):
extracted_features = export_model_utils.ExtractLocalAndGlobalFeatures(
input_image, input_scales, input_max_feature_num, input_abs_thres,
input_global_scales_ind, self._iou,
lambda x: self._model.build_call(x, training=False),
self._stride_factor)
named_output_tensors = {}
named_output_tensors['boxes'] = tf.identity(
extracted_features[0], name='boxes')
named_output_tensors['features'] = tf.identity(
extracted_features[1], name='features')
named_output_tensors['scales'] = tf.identity(
extracted_features[2], name='scales')
named_output_tensors['scores'] = tf.identity(
extracted_features[3], name='scores')
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features[4], name='global_descriptors')
return named_output_tensors
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError(f'Export_path {export_path} already exists. Please '
'specify a different path or delete the existing one.')
module = _ExtractModule(FLAGS.delg_global_features, FLAGS.delg_gem_power,
FLAGS.delg_embedding_layer_dim, FLAGS.block3_strides,
FLAGS.iou)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
tf.saved_model.save(module, export_path)
if __name__ == '__main__':
app.run(main)
| 6,601 | 37.608187 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delg_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELG model implementation based on the following paper.
Unifying Deep Local and Global Features for Image Search
https://arxiv.org/abs/2001.05027
"""
import functools
import math
from absl import logging
import tensorflow as tf
from delf.python.training.model import delf_model
layers = tf.keras.layers
class Delg(delf_model.Delf):
"""Instantiates Keras DELG model using ResNet50 as backbone.
This class implements the [DELG](https://arxiv.org/abs/2001.05027) model for
extracting local and global features from images. The same attention layer
is trained as in the DELF model. In addition, the extraction of global
features is trained using GeMPooling, a FC whitening layer also called
"embedding layer" and ArcFace loss.
"""
def __init__(self,
block3_strides=True,
name='DELG',
gem_power=3.0,
embedding_layer_dim=2048,
scale_factor_init=45.25, # sqrt(2048)
arcface_margin=0.1,
use_dim_reduction=False,
reduced_dimension=128,
dim_expand_channels=1024):
"""Initialization of DELG model.
Args:
block3_strides: bool, whether to add strides to the output of block3.
name: str, name to identify model.
gem_power: float, GeM power parameter.
embedding_layer_dim : int, dimension of the embedding layer.
scale_factor_init: float.
arcface_margin: float, ArcFace margin.
use_dim_reduction: Whether to integrate dimensionality reduction layers.
If True, extra layers are added to reduce the dimensionality of the
extracted features.
reduced_dimension: Only used if use_dim_reduction is True, the output
dimension of the dim_reduction layer.
dim_expand_channels: Only used if use_dim_reduction is True, the
number of channels of the backbone block used. Default value 1024 is the
number of channels of backbone block 'block3'.
"""
logging.info('Creating Delg model, gem_power %d, embedding_layer_dim %d',
gem_power, embedding_layer_dim)
super(Delg, self).__init__(block3_strides=block3_strides,
name=name,
pooling='gem',
gem_power=gem_power,
embedding_layer=True,
embedding_layer_dim=embedding_layer_dim,
use_dim_reduction=use_dim_reduction,
reduced_dimension=reduced_dimension,
dim_expand_channels=dim_expand_channels)
self._embedding_layer_dim = embedding_layer_dim
self._scale_factor_init = scale_factor_init
self._arcface_margin = arcface_margin
def init_classifiers(self, num_classes):
"""Define classifiers for training backbone and attention models."""
logging.info('Initializing Delg backbone and attention models classifiers')
backbone_classifier_func = self._create_backbone_classifier(num_classes)
super(Delg, self).init_classifiers(
num_classes,
desc_classification=backbone_classifier_func)
def _create_backbone_classifier(self, num_classes):
"""Define the classifier for training the backbone model."""
logging.info('Creating cosine classifier')
self.cosine_weights = tf.Variable(
initial_value=tf.initializers.GlorotUniform()(
shape=[self._embedding_layer_dim, num_classes]),
name='cosine_weights',
trainable=True)
self.scale_factor = tf.Variable(self._scale_factor_init,
name='scale_factor',
trainable=False)
classifier_func = functools.partial(cosine_classifier_logits,
num_classes=num_classes,
cosine_weights=self.cosine_weights,
scale_factor=self.scale_factor,
arcface_margin=self._arcface_margin)
classifier_func.trainable_weights = [self.cosine_weights]
return classifier_func
def cosine_classifier_logits(prelogits,
labels,
num_classes,
cosine_weights,
scale_factor,
arcface_margin,
training=True):
"""Compute cosine classifier logits using ArFace margin.
Args:
prelogits: float tensor of shape [batch_size, embedding_layer_dim].
labels: int tensor of shape [batch_size].
num_classes: int, number of classes.
cosine_weights: float tensor of shape [embedding_layer_dim, num_classes].
scale_factor: float.
arcface_margin: float. Only used if greater than zero, and training is True.
training: bool, True if training, False if eval.
Returns:
logits: Float tensor [batch_size, num_classes].
"""
# L2-normalize prelogits, then obtain cosine similarity.
normalized_prelogits = tf.math.l2_normalize(prelogits, axis=1)
normalized_weights = tf.math.l2_normalize(cosine_weights, axis=0)
cosine_sim = tf.matmul(normalized_prelogits, normalized_weights)
# Optionally use ArcFace margin.
if training and arcface_margin > 0.0:
# Reshape labels tensor from [batch_size] to [batch_size, num_classes].
one_hot_labels = tf.one_hot(labels, num_classes)
cosine_sim = apply_arcface_margin(cosine_sim,
one_hot_labels,
arcface_margin)
# Apply the scale factor to logits and return.
logits = scale_factor * cosine_sim
return logits
def apply_arcface_margin(cosine_sim, one_hot_labels, arcface_margin):
"""Applies ArcFace margin to cosine similarity inputs.
For a reference, see https://arxiv.org/pdf/1801.07698.pdf. ArFace margin is
applied to angles from correct classes (as per the ArcFace paper), and only
if they are <= (pi - margin). Otherwise, applying the margin may actually
improve their cosine similarity.
Args:
cosine_sim: float tensor with shape [batch_size, num_classes].
one_hot_labels: int tensor with shape [batch_size, num_classes].
arcface_margin: float.
Returns:
cosine_sim_with_margin: Float tensor with shape [batch_size, num_classes].
"""
theta = tf.acos(cosine_sim, name='acos')
selected_labels = tf.where(tf.greater(theta, math.pi - arcface_margin),
tf.zeros_like(one_hot_labels),
one_hot_labels,
name='selected_labels')
final_theta = tf.where(tf.cast(selected_labels, dtype=tf.bool),
theta + arcface_margin,
theta,
name='final_theta')
return tf.cos(final_theta, name='cosine_sim_with_margin')
| 7,641 | 41.692737 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_local_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export DELF tensorflow inference model.
The exported model may use an image pyramid for multi-scale processing, with
local feature extraction including receptive field calculation and keypoint
selection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'ckpt_path', '/tmp/delf-logdir/delf-weights', 'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_boolean(
'block3_strides', True, 'Whether to apply strides after block3.')
flags.DEFINE_float('iou', 1.0, 'IOU for non-max suppression.')
flags.DEFINE_boolean(
'use_autoencoder', True,
'Whether the exported model should use an autoencoder.')
flags.DEFINE_float(
'autoencoder_dimensions', 128,
'Number of dimensions of the autoencoder. Used only if'
'use_autoencoder=True.')
flags.DEFINE_float(
'local_feature_map_channels', 1024,
'Number of channels at backbone layer used for local feature extraction. '
'Default value 1024 is the number of channels of block3. Used only if'
'use_autoencoder=True.')
class _ExtractModule(tf.Module):
"""Helper module to build and save DELF model."""
def __init__(self, block3_strides, iou):
"""Initialization of DELF model.
Args:
block3_strides: bool, whether to add strides to the output of block3.
iou: IOU for non-max suppression.
"""
self._stride_factor = 2.0 if block3_strides else 1.0
self._iou = iou
# Setup the DELF model for extraction.
self._model = delf_model.Delf(
block3_strides=block3_strides,
name='DELF',
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(shape=(), dtype=tf.int32, name='input_max_feature_num'),
tf.TensorSpec(shape=(), dtype=tf.float32, name='input_abs_thres')
])
def ExtractFeatures(self, input_image, input_scales, input_max_feature_num,
input_abs_thres):
extracted_features = export_model_utils.ExtractLocalFeatures(
input_image, input_scales, input_max_feature_num, input_abs_thres,
self._iou, lambda x: self._model(x, training=False),
self._stride_factor)
named_output_tensors = {}
named_output_tensors['boxes'] = tf.identity(
extracted_features[0], name='boxes')
named_output_tensors['features'] = tf.identity(
extracted_features[1], name='features')
named_output_tensors['scales'] = tf.identity(
extracted_features[2], name='scales')
named_output_tensors['scores'] = tf.identity(
extracted_features[3], name='scores')
return named_output_tensors
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError(f'Export_path {export_path} already exists. Please '
'specify a different path or delete the existing one.')
module = _ExtractModule(FLAGS.block3_strides, FLAGS.iou)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
tf.saved_model.save(module, export_path)
if __name__ == '__main__':
app.run(main)
| 4,626 | 34.868217 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delg_model_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DELG model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from delf.python.training.model import delg_model
class DelgTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_forward_pass(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
local_feature_dim = 64
feature_map_size = image_size // 16 # reduction factor for resnet50.
if block3_strides:
feature_map_size //= 2
model = delg_model.Delg(block3_strides=block3_strides,
use_dim_reduction=True,
reduced_dimension=local_feature_dim)
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
# Run a complete forward pass of the model.
global_feature, attn_scores, local_features = model.build_call(images)
self.assertAllEqual(global_feature.shape, (batch_size, 2048))
self.assertAllEqual(
attn_scores.shape,
(batch_size, feature_map_size, feature_map_size, 1))
self.assertAllEqual(
local_features.shape,
(batch_size, feature_map_size, feature_map_size, local_feature_dim))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_build_model(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
model = delg_model.Delg(
block3_strides=block3_strides,
use_dim_reduction=True)
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
blocks = {}
desc_prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
desc_logits = model.desc_classification(desc_prelogits, labels)
self.assertAllEqual(desc_prelogits.shape, (batch_size, 2048))
self.assertAllEqual(desc_logits.shape, (batch_size, num_classes))
features = blocks['block3']
attn_prelogits, _, _ = model.attention(features)
attn_logits = model.attn_classification(attn_prelogits)
self.assertAllEqual(attn_prelogits.shape, (batch_size, 1024))
self.assertAllEqual(attn_logits.shape, (batch_size, num_classes))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_train_step(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
clip_val = 10.0
input_shape = (batch_size, image_size, image_size, 3)
model = delg_model.Delg(
block3_strides=block3_strides,
use_dim_reduction=True)
model.init_classifiers(num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
images = tf.random.uniform(input_shape, minval=0.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=batch_size)
with tf.GradientTape() as gradient_tape:
(desc_prelogits, attn_prelogits, _, backbone_blocks,
dim_expanded_features, _) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
desc_logits = model.desc_classification(desc_prelogits, labels)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Calculate reconstruction loss between the attention prelogits and the
# backbone.
block3 = tf.stop_gradient(backbone_blocks['block3'])
reconstruction_loss = tf.math.reduce_mean(
tf.keras.losses.MSE(block3, dim_expanded_features))
# Cumulate global loss and attention loss and backpropagate through the
# descriptor layer and attention layer together.
total_loss = desc_loss + attn_loss + reconstruction_loss
gradients = gradient_tape.gradient(total_loss, model.trainable_weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, model.trainable_weights))
if __name__ == '__main__':
tf.test.main()
| 5,899 | 37.815789 | 80 | py |
models | models-master/research/delf/delf/python/training/model/resnet50.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 backbone used in DELF model.
Copied over from tensorflow/python/eager/benchmarks/resnet50/resnet50.py,
because that code does not support dependencies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
from absl import logging
import h5py
import tensorflow as tf
from delf.python.pooling_layers import pooling as pooling_layers
layers = tf.keras.layers
class _IdentityBlock(tf.keras.Model):
"""_IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
"""
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
data_format=data_format,
name=conv_name_base + '2b')
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
class _ConvBlock(tf.keras.Model):
"""_ConvBlock is the block that has a conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
strides: strides for the convolution. Note that from stage 3, the first
conv layer at main path is with strides=(2,2), and the shortcut should
have strides=(2,2) as well.
"""
def __init__(self,
kernel_size,
filters,
stage,
block,
data_format,
strides=(2, 2)):
super(_ConvBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1),
strides=strides,
name=conv_name_base + '2a',
data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
name=conv_name_base + '2b',
data_format=data_format)
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
self.conv_shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
name=conv_name_base + '1',
data_format=data_format)
self.bn_shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
shortcut = self.conv_shortcut(input_tensor)
shortcut = self.bn_shortcut(shortcut, training=training)
x += shortcut
return tf.nn.relu(x)
# pylint: disable=not-callable
class ResNet50(tf.keras.Model):
"""Instantiates the ResNet50 architecture.
Args:
data_format: format for the image. Either 'channels_first' or
'channels_last'. 'channels_first' is typically faster on GPUs while
'channels_last' is typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
name: Prefix applied to names of variables created in the model.
include_top: whether to include the fully-connected layer at the top of the
network.
pooling: Optional pooling mode for feature extraction when `include_top` is
False. 'None' means that the output of the model will be the 4D tensor
output of the last convolutional layer. 'avg' means that global average
pooling will be applied to the output of the last convolutional layer, and
thus the output of the model will be a 2D tensor. 'max' means that global
max pooling will be applied. 'gem' means GeM pooling will be applied.
block3_strides: whether to add a stride of 2 to block3 to make it compatible
with tf.slim ResNet implementation.
average_pooling: whether to do average pooling of block4 features before
global pooling.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True.
gem_power: GeM power for GeM pooling. Only used if pooling == 'gem'.
embedding_layer: whether to create an embedding layer (FC whitening layer).
embedding_layer_dim: size of the embedding layer.
Raises:
ValueError: in case of invalid argument for data_format.
"""
def __init__(self,
data_format,
name='',
include_top=True,
pooling=None,
block3_strides=False,
average_pooling=True,
classes=1000,
gem_power=3.0,
embedding_layer=False,
embedding_layer_dim=2048):
super(ResNet50, self).__init__(name=name)
valid_channel_values = ('channels_first', 'channels_last')
if data_format not in valid_channel_values:
raise ValueError('Unknown data_format: %s. Valid values: %s' %
(data_format, valid_channel_values))
self.include_top = include_top
self.block3_strides = block3_strides
self.average_pooling = average_pooling
self.pooling = pooling
def conv_block(filters, stage, block, strides=(2, 2)):
return _ConvBlock(
3,
filters,
stage=stage,
block=block,
data_format=data_format,
strides=strides)
def id_block(filters, stage, block):
return _IdentityBlock(
3, filters, stage=stage, block=block, data_format=data_format)
self.conv1 = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
data_format=data_format,
padding='same',
name='conv1')
bn_axis = 1 if data_format == 'channels_first' else 3
self.bn_conv1 = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')
self.max_pool = layers.MaxPooling2D((3, 3),
strides=(2, 2),
data_format=data_format)
self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1))
self.l2b = id_block([64, 64, 256], stage=2, block='b')
self.l2c = id_block([64, 64, 256], stage=2, block='c')
self.l3a = conv_block([128, 128, 512], stage=3, block='a')
self.l3b = id_block([128, 128, 512], stage=3, block='b')
self.l3c = id_block([128, 128, 512], stage=3, block='c')
self.l3d = id_block([128, 128, 512], stage=3, block='d')
self.l4a = conv_block([256, 256, 1024], stage=4, block='a')
self.l4b = id_block([256, 256, 1024], stage=4, block='b')
self.l4c = id_block([256, 256, 1024], stage=4, block='c')
self.l4d = id_block([256, 256, 1024], stage=4, block='d')
self.l4e = id_block([256, 256, 1024], stage=4, block='e')
self.l4f = id_block([256, 256, 1024], stage=4, block='f')
# Striding layer that can be used on top of block3 to produce feature maps
# with the same resolution as the TF-Slim implementation.
if self.block3_strides:
self.subsampling_layer = layers.MaxPooling2D((1, 1),
strides=(2, 2),
data_format=data_format)
self.l5a = conv_block([512, 512, 2048],
stage=5,
block='a',
strides=(1, 1))
else:
self.l5a = conv_block([512, 512, 2048], stage=5, block='a')
self.l5b = id_block([512, 512, 2048], stage=5, block='b')
self.l5c = id_block([512, 512, 2048], stage=5, block='c')
self.avg_pool = layers.AveragePooling2D((7, 7),
strides=(7, 7),
data_format=data_format)
if self.include_top:
self.flatten = layers.Flatten()
self.fc1000 = layers.Dense(classes, name='fc1000')
else:
reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3]
reduction_indices = tf.constant(reduction_indices)
if pooling == 'avg':
self.global_pooling = functools.partial(
tf.reduce_mean, axis=reduction_indices, keepdims=False)
elif pooling == 'max':
self.global_pooling = functools.partial(
tf.reduce_max, axis=reduction_indices, keepdims=False)
elif pooling == 'gem':
logging.info('Adding GeMPooling layer with power %f', gem_power)
self.global_pooling = functools.partial(
pooling_layers.gem, axis=reduction_indices, power=gem_power)
else:
self.global_pooling = None
if embedding_layer:
logging.info('Adding embedding layer with dimension %d',
embedding_layer_dim)
self.embedding_layer = layers.Dense(
embedding_layer_dim, name='embedding_layer')
else:
self.embedding_layer = None
def build_call(self, inputs, training=True, intermediates_dict=None):
"""Building the ResNet50 model.
Args:
inputs: Images to compute features for.
training: Whether model is in training phase.
intermediates_dict: `None` or dictionary. If not None, accumulate feature
maps from intermediate blocks into the dictionary. ""
Returns:
Tensor with featuremap.
"""
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
if intermediates_dict is not None:
intermediates_dict['block0'] = x
x = self.max_pool(x)
if intermediates_dict is not None:
intermediates_dict['block0mp'] = x
# Block 1 (equivalent to "conv2" in Resnet paper).
x = self.l2a(x, training=training)
x = self.l2b(x, training=training)
x = self.l2c(x, training=training)
if intermediates_dict is not None:
intermediates_dict['block1'] = x
# Block 2 (equivalent to "conv3" in Resnet paper).
x = self.l3a(x, training=training)
x = self.l3b(x, training=training)
x = self.l3c(x, training=training)
x = self.l3d(x, training=training)
if intermediates_dict is not None:
intermediates_dict['block2'] = x
# Block 3 (equivalent to "conv4" in Resnet paper).
x = self.l4a(x, training=training)
x = self.l4b(x, training=training)
x = self.l4c(x, training=training)
x = self.l4d(x, training=training)
x = self.l4e(x, training=training)
x = self.l4f(x, training=training)
if self.block3_strides:
x = self.subsampling_layer(x)
if intermediates_dict is not None:
intermediates_dict['block3'] = x
else:
if intermediates_dict is not None:
intermediates_dict['block3'] = x
x = self.l5a(x, training=training)
x = self.l5b(x, training=training)
x = self.l5c(x, training=training)
if self.average_pooling:
x = self.avg_pool(x)
if intermediates_dict is not None:
intermediates_dict['block4'] = x
else:
if intermediates_dict is not None:
intermediates_dict['block4'] = x
if self.include_top:
return self.fc1000(self.flatten(x))
elif self.global_pooling:
x = self.global_pooling(x)
if self.embedding_layer:
x = self.embedding_layer(x)
return x
else:
return x
def call(self, inputs, training=True, intermediates_dict=None):
"""Call the ResNet50 model.
Args:
inputs: Images to compute features for.
training: Whether model is in training phase.
intermediates_dict: `None` or dictionary. If not None, accumulate feature
maps from intermediate blocks into the dictionary. ""
Returns:
Tensor with featuremap.
"""
return self.build_call(inputs, training, intermediates_dict)
def restore_weights(self, filepath):
"""Load pretrained weights.
This function loads a .h5 file from the filepath with saved model weights
and assigns them to the model.
Args:
filepath: String, path to the .h5 file
Raises:
ValueError: if the file referenced by `filepath` does not exist.
"""
if not tf.io.gfile.exists(filepath):
raise ValueError('Unable to load weights from %s. You must provide a'
'valid file.' % (filepath))
# Create a local copy of the weights file for h5py to be able to read it.
local_filename = os.path.basename(filepath)
tmp_filename = os.path.join(tempfile.gettempdir(), local_filename)
tf.io.gfile.copy(filepath, tmp_filename, overwrite=True)
# Load the content of the weights file.
f = h5py.File(tmp_filename, mode='r')
saved_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
try:
# Iterate through all the layers assuming the max `depth` is 2.
for layer in self.layers:
if hasattr(layer, 'layers'):
for inlayer in layer.layers:
# Make sure the weights are in the saved model, and that we are in
# the innermost layer.
if inlayer.name not in saved_layer_names:
raise ValueError('Layer %s absent from the pretrained weights.'
'Unable to load its weights.' % (inlayer.name))
if hasattr(inlayer, 'layers'):
raise ValueError('Layer %s is not a depth 2 layer. Unable to load'
'its weights.' % (inlayer.name))
# Assign the weights in the current layer.
g = f[inlayer.name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
logging.info('Setting the weights for layer %s', inlayer.name)
inlayer.set_weights(weight_values)
finally:
# Clean up the temporary file.
tf.io.gfile.remove(tmp_filename)
def log_weights(self):
"""Log backbone weights."""
logging.info('Logging backbone weights')
logging.info('------------------------')
for layer in self.layers:
if hasattr(layer, 'layers'):
for inlayer in layer.layers:
logging.info('Weights for layer: %s, inlayer % s', layer.name,
inlayer.name)
weights = inlayer.get_weights()
logging.info(weights)
else:
logging.info('Layer %s does not have inner layers.', layer.name)
| 17,089 | 36.071584 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.