desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.'
| def _extract_box_classifier_features(self, proposal_feature_maps, scope):
| with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm], is_training=False):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.2)
net = inception_resnet_v2.block8(net, activation_fn=None)
proposal_classifier_features = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
return proposal_classifier_features
|
'Returns callable for loading a checkpoint into the tensorflow graph.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
InceptionResnetV2 checkpoints.
TODO: revisit whether it\'s possible to force the `Repeat` namescope as
created in `_extract_box_classifier_features` to start counting at 2 (e.g.
`Repeat_2`) so that the default restore_fn can be used.
Args:
checkpoint_path: Path to checkpoint to restore.
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
a callable which takes a tf.Session as input and loads a checkpoint when
run.'
| def restore_from_classification_checkpoint_fn(self, checkpoint_path, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope):
| variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(first_stage_feature_extractor_scope):
var_name = variable.op.name.replace((first_stage_feature_extractor_scope + '/'), '')
variables_to_restore[var_name] = variable
if variable.op.name.startswith(second_stage_feature_extractor_scope):
var_name = variable.op.name.replace((second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat'), 'InceptionResnetV2/Repeat_2')
var_name = var_name.replace((second_stage_feature_extractor_scope + '/'), '')
variables_to_restore[var_name] = variable
variables_to_restore = variables_helper.get_variables_available_in_checkpoint(variables_to_restore, checkpoint_path)
saver = tf.train.Saver(variables_to_restore)
def restore(sess):
saver.restore(sess, checkpoint_path)
return restore
|
'Call the loss function.
Args:
prediction_tensor: a tensor representing predicted quantities.
target_tensor: a tensor representing regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn\'t be factored into the loss.
scope: Op scope name. Defaults to \'Loss\' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.'
| def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, scope=None, **params):
| with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
|
'Method to be overriden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function'
| @abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
| pass
|
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]'
| def _compute_loss(self, prediction_tensor, target_tensor, weights):
| weighted_diff = ((prediction_tensor - target_tensor) * tf.expand_dims(weights, 2))
square_diff = (0.5 * tf.square(weighted_diff))
if self._anchorwise_output:
return tf.reduce_sum(square_diff, 2)
return tf.reduce_sum(square_diff)
|
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function'
| def _compute_loss(self, prediction_tensor, target_tensor, weights):
| diff = (prediction_tensor - target_tensor)
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
anchorwise_smooth_l1norm = (tf.reduce_sum(tf.where(abs_diff_lt_1, (0.5 * tf.square(abs_diff)), (abs_diff - 0.5)), 2) * weights)
if self._anchorwise_output:
return anchorwise_smooth_l1norm
return tf.reduce_sum(anchorwise_smooth_l1norm)
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded predicted boxes
target_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded target boxes
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function'
| def _compute_loss(self, prediction_tensor, target_tensor, weights):
| predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [(-1), 4]))
target_boxes = box_list.BoxList(tf.reshape(target_tensor, [(-1), 4]))
per_anchor_iou_loss = (1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes))
return tf.reduce_sum((tf.reshape(weights, [(-1)]) * per_anchor_iou_loss))
|
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]'
| def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None):
| weights = tf.expand_dims(weights, 2)
if (class_indices is not None):
weights *= tf.reshape(ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, (-1)])
per_entry_cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(labels=target_tensor, logits=prediction_tensor)
if self._anchorwise_output:
return tf.reduce_sum((per_entry_cross_ent * weights), 2)
return tf.reduce_sum((per_entry_cross_ent * weights))
|
'Constructor.
Args:
anchorwise_output: Whether to output loss per anchor (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function'
| def _compute_loss(self, prediction_tensor, target_tensor, weights):
| num_classes = prediction_tensor.get_shape().as_list()[(-1)]
per_row_cross_ent = tf.nn.softmax_cross_entropy_with_logits(labels=tf.reshape(target_tensor, [(-1), num_classes]), logits=tf.reshape(prediction_tensor, [(-1), num_classes]))
if self._anchorwise_output:
return (tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights)
return tf.reduce_sum((per_row_cross_ent * tf.reshape(weights, [(-1)])))
|
'Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either \'hard\' or \'soft\' (default)
anchorwise_output: Outputs loss per anchor. (default False)
Raises:
ValueError: if bootstrap_type is not either \'hard\' or \'soft\''
| def __init__(self, alpha, bootstrap_type='soft', anchorwise_output=False):
| if ((bootstrap_type != 'hard') and (bootstrap_type != 'soft')):
raise ValueError("Unrecognized bootstrap_type: must be one of 'hard' or 'soft.'")
self._alpha = alpha
self._bootstrap_type = bootstrap_type
self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]'
| def _compute_loss(self, prediction_tensor, target_tensor, weights):
| if (self._bootstrap_type == 'soft'):
bootstrap_target_tensor = ((self._alpha * target_tensor) + ((1.0 - self._alpha) * tf.sigmoid(prediction_tensor)))
else:
bootstrap_target_tensor = ((self._alpha * target_tensor) + ((1.0 - self._alpha) * tf.cast((tf.sigmoid(prediction_tensor) > 0.5), tf.float32)))
per_entry_cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(labels=bootstrap_target_tensor, logits=prediction_tensor)
if self._anchorwise_output:
return tf.reduce_sum((per_entry_cross_ent * tf.expand_dims(weights, 2)), 2)
return tf.reduce_sum((per_entry_cross_ent * tf.expand_dims(weights, 2)))
|
'Constructor.
The hard example mining implemented by this class can replicate the behavior
in the two aforementioned papers (Srivastava et al., and Liu et al).
To replicate the A2 paper (Srivastava et al), num_hard_examples is set
to a fixed parameter (64 by default) and iou_threshold is set to .7 for
running non-max-suppression the predicted boxes prior to hard mining.
In order to replicate the SSD paper (Liu et al), num_hard_examples should
be set to None, max_negatives_per_positive should be 3 and iou_threshold
should be 1.0 (in order to effectively turn off NMS).
Args:
num_hard_examples: maximum number of hard examples to be
selected per image (prior to enforcing max negative to positive ratio
constraint). If set to None, all examples obtained after NMS are
considered.
iou_threshold: minimum intersection over union for an example
to be discarded during NMS.
loss_type: use only classification losses (\'cls\', default),
localization losses (\'loc\') or both losses (\'both\').
In the last case, cls_loss_weight and loc_loss_weight are used to
compute weighted sum of the two losses.
cls_loss_weight: weight for classification loss.
loc_loss_weight: weight for location loss.
max_negatives_per_positive: maximum number of negatives to retain for
each positive anchor. By default, num_negatives_per_positive is None,
which means that we do not enforce a prespecified negative:positive
ratio. Note also that num_negatives_per_positives can be a float
(and will be converted to be a float even if it is passed in otherwise).
min_negatives_per_image: minimum number of negative anchors to sample for
a given image. Setting this to a positive number allows sampling
negatives in an image without any positive anchors and thus not biased
towards at least one detection per image.'
| def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0):
| self._num_hard_examples = num_hard_examples
self._iou_threshold = iou_threshold
self._loss_type = loss_type
self._cls_loss_weight = cls_loss_weight
self._loc_loss_weight = loc_loss_weight
self._max_negatives_per_positive = max_negatives_per_positive
self._min_negatives_per_image = min_negatives_per_image
if (self._max_negatives_per_positive is not None):
self._max_negatives_per_positive = float(self._max_negatives_per_positive)
self._num_positives_list = None
self._num_negatives_list = None
|
'Computes localization and classification losses after hard mining.
Args:
location_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise localization losses.
cls_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise classification losses.
decoded_boxlist_list: a list of decoded BoxList representing location
predictions for each image.
match_list: an optional list of matcher.Match objects encoding the match
between anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors. Match objects in match_list are
used to reference which anchors are positive, negative or ignored. If
self._max_negatives_per_positive exists, these are then used to enforce
a prespecified negative to positive ratio.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
Raises:
ValueError: if location_losses, cls_losses and decoded_boxlist_list do
not have compatible shapes (i.e., they must correspond to the same
number of images).
ValueError: if match_list is specified but its length does not match
len(decoded_boxlist_list).'
| def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None):
| mined_location_losses = []
mined_cls_losses = []
location_losses = tf.unstack(location_losses)
cls_losses = tf.unstack(cls_losses)
num_images = len(decoded_boxlist_list)
if (not match_list):
match_list = (num_images * [None])
if (not (len(location_losses) == len(decoded_boxlist_list) == len(cls_losses))):
raise ValueError('location_losses, cls_losses and decoded_boxlist_list do not have compatible shapes.')
if (not isinstance(match_list, list)):
raise ValueError('match_list must be a list.')
if (len(match_list) != len(decoded_boxlist_list)):
raise ValueError('match_list must either be None or have length=len(decoded_boxlist_list).')
num_positives_list = []
num_negatives_list = []
for (ind, detection_boxlist) in enumerate(decoded_boxlist_list):
box_locations = detection_boxlist.get()
match = match_list[ind]
image_losses = cls_losses[ind]
if (self._loss_type == 'loc'):
image_losses = location_losses[ind]
elif (self._loss_type == 'both'):
image_losses *= self._cls_loss_weight
image_losses += (location_losses[ind] * self._loc_loss_weight)
if (self._num_hard_examples is not None):
num_hard_examples = self._num_hard_examples
else:
num_hard_examples = detection_boxlist.num_boxes()
selected_indices = tf.image.non_max_suppression(box_locations, image_losses, num_hard_examples, self._iou_threshold)
if ((self._max_negatives_per_positive is not None) and match):
(selected_indices, num_positives, num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio(selected_indices, match, self._max_negatives_per_positive, self._min_negatives_per_image)
num_positives_list.append(num_positives)
num_negatives_list.append(num_negatives)
mined_location_losses.append(tf.reduce_sum(tf.gather(location_losses[ind], selected_indices)))
mined_cls_losses.append(tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices)))
location_loss = tf.reduce_sum(tf.stack(mined_location_losses))
cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses))
if (match and self._max_negatives_per_positive):
self._num_positives_list = num_positives_list
self._num_negatives_list = num_negatives_list
return (location_loss, cls_loss)
|
'Summarize the number of positives and negatives after mining.'
| def summarize(self):
| if (self._num_positives_list and self._num_negatives_list):
avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list))
avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list))
tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives)
|
'Subsample a collection of selected indices to a desired neg:pos ratio.
This function takes a subset of M indices (indexing into a large anchor
collection of N anchors where M<N) which are labeled as positive/negative
via a Match object (matched indices are positive, unmatched indices
are negative). It returns a subset of the provided indices retaining all
positives as well as up to the first K negatives, where:
K=floor(num_negative_per_positive * num_positives).
For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
with positives=[2, 5] and negatives=[4, 7, 9, 10] and
num_negatives_per_positive=1, then the returned subset of indices
is [2, 4, 5, 7].
Args:
indices: An integer tensor of shape [M] representing a collection
of selected anchor indices
match: A matcher.Match object encoding the match between anchors and
groundtruth boxes for a given image, with rows of the Match objects
corresponding to groundtruth boxes and columns corresponding to anchors.
max_negatives_per_positive: (float) maximum number of negatives for
each positive anchor.
min_negatives_per_image: minimum number of negative anchors for a given
image. Allow sampling negatives in image without any positive anchors.
Returns:
selected_indices: An integer tensor of shape [M\'] representing a
collection of selected anchor indices with M\' <= M.
num_positives: An integer tensor representing the number of positive
examples in selected set of indices.
num_negatives: An integer tensor representing the number of negative
examples in selected set of indices.'
| def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0):
| positives_indicator = tf.gather(match.matched_column_indicator(), indices)
negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
num_positives = tf.reduce_sum(tf.to_int32(positives_indicator))
max_negatives = tf.maximum(min_negatives_per_image, tf.to_int32((max_negatives_per_positive * tf.to_float(num_positives))))
topk_negatives_indicator = tf.less_equal(tf.cumsum(tf.to_int32(negatives_indicator)), max_negatives)
subsampled_selection_indices = tf.where(tf.logical_or(positives_indicator, topk_negatives_indicator))
num_negatives = (tf.size(subsampled_selection_indices) - num_positives)
return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [(-1)]), num_positives, num_negatives)
|
'Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to \'Compare\' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.'
| def compare(self, boxlist1, boxlist2, scope=None):
| with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
|
'Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.'
| def _compare(self, boxlist1, boxlist2):
| return box_list_ops.iou(boxlist1, boxlist2)
|
'Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.'
| def _compare(self, boxlist1, boxlist2):
| return ((-1) * box_list_ops.sq_dist(boxlist1, boxlist2))
|
'Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.'
| def _compare(self, boxlist1, boxlist2):
| return box_list_ops.ioa(boxlist1, boxlist2)
|
'Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1'
| def __init__(self, positive_fraction=0.5):
| if ((positive_fraction < 0) or (positive_fraction > 1)):
raise ValueError(('positive_fraction should be in range [0,1]. Received: %s.' % positive_fraction))
self._positive_fraction = positive_fraction
|
'Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
Returns:
is_sampled: boolean tensor of shape [N], True for entries which are
sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.'
| def subsample(self, indicator, batch_size, labels):
| if (len(indicator.get_shape().as_list()) != 1):
raise ValueError(('indicator must be 1 dimensional, got a tensor of shape %s' % indicator.get_shape()))
if (len(labels.get_shape().as_list()) != 1):
raise ValueError(('labels must be 1 dimensional, got a tensor of shape %s' % labels.get_shape()))
if (labels.dtype != tf.bool):
raise ValueError(('labels should be of type bool. Received: %s' % labels.dtype))
if (indicator.dtype != tf.bool):
raise ValueError(('indicator should be of type bool. Received: %s' % indicator.dtype))
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
max_num_pos = int((self._positive_fraction * batch_size))
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
max_num_neg = (batch_size - tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)))
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
sampled_idx = tf.logical_or(sampled_pos_idx, sampled_neg_idx)
return sampled_idx
|
'Tests that named constructor gives working target assigners.
TODO: Make this test more general.'
| def test_create_target_assigner(self):
| corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors.add_field('stddev', prior_stddevs)
multibox_ta = targetassigner.create_target_assigner('Multibox', stage='proposal')
multibox_ta.assign(priors, groundtruth)
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = targetassigner.create_target_assigner('FasterRCNN', stage='proposal')
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = targetassigner.create_target_assigner('FastRCNN')
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = targetassigner.create_target_assigner('FasterRCNN', stage='detection')
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector', stage='invalid_stage')
|
'Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.'
| def __init__(self, boxes):
| if ((len(boxes.get_shape()) != 2) or (boxes.get_shape()[(-1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
if (boxes.dtype != tf.float32):
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
|
'Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.'
| def num_boxes(self):
| return tf.shape(self.data['boxes'])[0]
|
'Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.'
| def num_boxes_static(self):
| return self.data['boxes'].get_shape()[0].value
|
'Returns all fields.'
| def get_all_fields(self):
| return self.data.keys()
|
'Returns all non-box fields (i.e., everything not named \'boxes\').'
| def get_extra_fields(self):
| return [k for k in self.data.keys() if (k != 'boxes')]
|
'Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList'
| def add_field(self, field, field_data):
| self.data[field] = field_data
|
'Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.'
| def get(self):
| return self.get_field('boxes')
|
'Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data'
| def set(self, boxes):
| if ((len(boxes.get_shape()) != 2) or (boxes.get_shape()[(-1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
|
'Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field'
| def get_field(self, field):
| if (not self.has_field(field)):
raise ValueError((('field ' + str(field)) + ' does not exist'))
return self.data[field]
|
'Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.'
| def set_field(self, field, value):
| if (not self.has_field(field)):
raise ValueError(('field %s does not exist' % field))
self.data[field] = value
|
'Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].'
| def get_center_coordinates_and_sizes(self, scope=None):
| with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
(ymin, xmin, ymax, xmax) = tf.unstack(tf.transpose(box_corners))
width = (xmax - xmin)
height = (ymax - ymin)
ycenter = (ymin + (height / 2.0))
xcenter = (xmin + (width / 2.0))
return [ycenter, xcenter, height, width]
|
'Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.'
| def transpose_coordinates(self, scope=None):
| with tf.name_scope(scope, 'transpose_coordinates'):
(y_min, x_min, y_max, x_max) = tf.split(value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
|
'Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.'
| def as_tensor_dict(self, fields=None):
| tensor_dict = {}
if (fields is None):
fields = self.get_all_fields()
for field in fields:
if (not self.has_field(field)):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
|
'Tests image resizing, checking output sizes.'
| def testResizeToRange(self):
| in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for (in_shape, expected_shape) in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocessor.resize_to_range(in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
|
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithMasks(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
(out_image, out_masks) = preprocessor.resize_to_range(in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
(out_image_shape, out_masks_shape) = sess.run([out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
|
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithNoInstanceMask(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
(out_image, out_masks) = preprocessor.resize_to_range(in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
(out_image_shape, out_masks_shape) = sess.run([out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
|
'Tests image resizing, checking output sizes.'
| def testResizeImageWithMasks(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
(out_image, out_masks) = preprocessor.resize_image(in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
(out_image_shape, out_masks_shape) = sess.run([out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
|
'Tests image resizing, checking output sizes.'
| def testResizeImageWithNoInstanceMask(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
(out_image, out_masks) = preprocessor.resize_image(in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
(out_image_shape, out_masks_shape) = sess.run([out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
|
'Tests image resizing, checking output sizes.'
| def testResizeToRangeSameMinMax(self):
| in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for (in_shape, expected_shape) in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocessor.resize_to_range(in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
|
'Tests box scaling, checking scaled values.'
| def testScaleBoxesToPixelCoordinates(self):
| in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6], [0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6.0, 8.0, 24.0, 24.0], [30.0, 12.0, 54.0, 28.0]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
(_, out_boxes) = preprocessor.scale_boxes_to_pixel_coordinates(in_image, boxes=in_boxes)
with self.test_session() as sess:
out_boxes = sess.run(out_boxes)
self.assertAllClose(out_boxes, expected_boxes)
|
'Tests box and keypoint scaling, checking scaled values.'
| def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
| in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0.0, 10.0, 45.0, 40.0], [15.0, 20.0, 45.0, 40.0]]
expected_keypoints = [[[6.0, 4.0], [12.0, 8.0], [18.0, 12.0]], [[24.0, 16.0], [30.0, 20.0], [36.0, 24.0]]]
in_image = tf.random_uniform(in_shape)
(_, out_boxes, out_keypoints) = preprocessor.scale_boxes_to_pixel_coordinates(in_image, boxes=in_boxes, keypoints=in_keypoints)
with self.test_session() as sess:
(out_boxes_, out_keypoints_) = sess.run([out_boxes, out_keypoints])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
|
'Tests whether channel means have been subtracted.'
| def testSubtractChannelMean(self):
| with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == (-1)).all())
self.assertTrue((actual[:, :, 1] == (-2)).all())
self.assertTrue((actual[:, :, 2] == (-3)).all())
|
'Tests one hot encoding of multiclass labels.'
| def testOneHotEncoding(self):
| with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
|
'Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not* include
background categories that might be implicitly be predicted in various
implementations.'
| def __init__(self, num_classes):
| self._num_classes = num_classes
self._groundtruth_lists = {}
|
'Access list of groundtruth tensors.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,keypoints}
Returns:
a list of tensors holding groundtruth information (see also
provide_groundtruth function below), with one entry for each image in the
batch.
Raises:
RuntimeError: if the field has not been provided via provide_groundtruth.'
| def groundtruth_lists(self, field):
| if (field not in self._groundtruth_lists):
raise RuntimeError('Groundtruth tensor %s has not been provided', field)
return self._groundtruth_lists[field]
|
'Input preprocessing.
To be overridden by implementations.
This function is responsible for any scaling/shifting of input values that
is necessary prior to running the detector on an input image.
It is also responsible for any resizing that might be necessary as images
are assumed to arrive in arbitrary sizes. While this function could
conceivably be part of the predict method (below), it is often convenient
to keep these separate --- for example, we may want to preprocess on one
device, place onto a queue, and let another device (e.g., the GPU) handle
prediction.
A few important notes about the preprocess function:
+ We assume that this operation does not have any trainable variables nor
does it affect the groundtruth annotations in any way (thus data
augmentation operations such as random cropping should be performed
externally).
+ There is no assumption that the batchsize in this function is the same as
the batch size in the predict function. In fact, we recommend calling the
preprocess function prior to calling any batching operations (which should
happen outside of the model) and thus assuming that batch sizes are equal
to 1 in the preprocess function.
+ There is also no explicit assumption that the output resolutions
must be fixed across inputs --- this is to support "fully convolutional"
settings in which input images can have different shapes/resolutions.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float32
tensor representing a batch of images.'
| @abstractmethod
def preprocess(self, inputs):
| pass
|
'Predict prediction tensors from inputs tensor.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.'
| @abstractmethod
def predict(self, preprocessed_inputs):
| pass
|
'Convert predicted output tensors to final detections.
Outputs adhere to the following conventions:
* Classes are integers in [0, num_classes); background classes are removed
and the first non-background class is mapped to 0.
* Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max]
format and normalized relative to the image window.
* `num_detections` is provided for settings where detections are padded to a
fixed number of boxes.
* We do not specifically assume any kind of probabilistic interpretation
of the scores --- the only important thing is their relative ordering.
Thus implementations of the postprocess function are free to output
logits, probabilities, calibrated probabilities, or anything else.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
instance_masks: [batch, max_detections, image_height, image_width]
(optional)
keypoints: [batch, max_detections, num_keypoints, 2] (optional)
num_detections: [batch]'
| @abstractmethod
def postprocess(self, prediction_dict, **params):
| pass
|
'Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.'
| @abstractmethod
def loss(self, prediction_dict):
| pass
|
'Provide groundtruth tensors.
Args:
groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape
[num_boxes, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot)
tensors of shape [num_boxes, num_classes] containing the class targets
with the 0th index assumed to map to the first non-background class.
groundtruth_masks_list: a list of 2-D tf.float32 tensors of
shape [max_detections, height_in, width_in] containing instance
masks with values in {0, 1}. If None, no masks are provided.
Mask resolution `height_in`x`width_in` must agree with the resolution
of the input image tensor provided to the `preprocess` function.
groundtruth_keypoints_list: a list of 2-D tf.float32 tensors of
shape [batch, max_detections, num_keypoints, 2] containing keypoints.
Keypoints are assumed to be provided in normalized coordinates and
missing keypoints should be encoded as NaN.'
| def provide_groundtruth(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list=None, groundtruth_keypoints_list=None):
| self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list
self._groundtruth_lists[fields.BoxListFields.classes] = groundtruth_classes_list
if groundtruth_masks_list:
self._groundtruth_lists[fields.BoxListFields.masks] = groundtruth_masks_list
if groundtruth_keypoints_list:
self._groundtruth_lists[fields.BoxListFields.keypoints] = groundtruth_keypoints_list
|
'Return callable for loading a foreign checkpoint into tensorflow graph.
Loads variables from a different tensorflow graph (typically feature
extractor variables). This enables the model to initialize based on weights
from another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Args:
checkpoint_path: path to checkpoint to restore.
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
a callable which takes a tf.Session as input and loads a checkpoint when
run.'
| @abstractmethod
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
| pass
|
'Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant'
| @abstractproperty
def code_size(self):
| pass
|
'Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes'
| def encode(self, boxes, anchors):
| with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
|
'Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)'
| def decode(self, rel_codes, anchors):
| with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
|
'Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes'
| @abstractmethod
def _encode(self, boxes, anchors):
| pass
|
'Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)'
| @abstractmethod
def _decode(self, rel_codes, anchors):
| pass
|
'Construct Multibox Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
positive_class_weight: classification weight to be associated to positive
anchors (default: 1.0)
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder'
| def __init__(self, similarity_calc, matcher, box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=None):
| if (not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator)):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if (not isinstance(matcher, mat.Matcher)):
raise ValueError('matcher must be a Matcher')
if (not isinstance(box_coder, bcoder.BoxCoder)):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._positive_class_weight = positive_class_weight
self._negative_class_weight = negative_class_weight
if (unmatched_cls_target is None):
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
|
'Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList'
| def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, **params):
| if (not isinstance(anchors, box_list.BoxList)):
raise ValueError('anchors must be an BoxList')
if (not isinstance(groundtruth_boxes, box_list.BoxList)):
raise ValueError('groundtruth_boxes must be an BoxList')
if (groundtruth_labels is None):
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, (-1))
shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:], tf.shape(self._unmatched_cls_target))
with tf.control_dependencies([shape_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match)
cls_weights = self._create_classification_weights(match, self._positive_class_weight, self._negative_class_weight)
num_anchors = anchors.num_boxes_static()
if (num_anchors is not None):
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights, match)
|
'Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target\'s
first dimension.
Returns:
A tensor with the shape info filled in.'
| def _reset_target_shape(self, target, num_anchors):
| target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
|
'Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]'
| def _create_regression_targets(self, anchors, groundtruth_boxes, match):
| matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices()
matched_gt_indices = match.matched_row_indices()
matched_anchors = box_list_ops.gather(anchors, matched_anchor_indices)
matched_gt_boxes = box_list_ops.gather(groundtruth_boxes, matched_gt_indices)
matched_reg_targets = self._box_coder.encode(matched_gt_boxes, matched_anchors)
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), tf.stack([tf.size(unmatched_ignored_anchor_indices), 1]))
reg_targets = tf.dynamic_stitch([matched_anchor_indices, unmatched_ignored_anchor_indices], [matched_reg_targets, unmatched_ignored_reg_targets])
return reg_targets
|
'Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]'
| def _default_regression_target(self):
| return tf.constant([(self._box_coder.code_size * [0])], tf.float32)
|
'Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].'
| def _create_classification_targets(self, groundtruth_labels, match):
| matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices()
matched_gt_indices = match.matched_row_indices()
matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices)
ones = (self._unmatched_cls_target.shape.ndims * [1])
unmatched_ignored_cls_targets = tf.tile(tf.expand_dims(self._unmatched_cls_target, 0), tf.stack(([tf.size(unmatched_ignored_anchor_indices)] + ones)))
cls_targets = tf.dynamic_stitch([matched_anchor_indices, unmatched_ignored_anchor_indices], [matched_cls_targets, unmatched_ignored_cls_targets])
return cls_targets
|
'Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
reg_weights: a float32 tensor with shape [num_anchors] representing
regression weights'
| def _create_regression_weights(self, match):
| reg_weights = tf.cast(match.matched_column_indicator(), tf.float32)
return reg_weights
|
'Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
positive_class_weight: weight to be associated to positive anchors
negative_class_weight: weight to be associated to negative anchors
Returns:
cls_weights: a float32 tensor with shape [num_anchors] representing
classification weights.'
| def _create_classification_weights(self, match, positive_class_weight=1.0, negative_class_weight=1.0):
| matched_indicator = tf.cast(match.matched_column_indicator(), tf.float32)
ignore_indicator = tf.cast(match.ignored_column_indicator(), tf.float32)
unmatched_indicator = ((1.0 - matched_indicator) - ignore_indicator)
cls_weights = ((positive_class_weight * matched_indicator) + (negative_class_weight * unmatched_indicator))
return cls_weights
|
'Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder: BoxCoder object.'
| def get_box_coder(self):
| return self._box_coder
|
'Constructs a batch queue holding tensor_dict.
Args:
tensor_dict: dictionary of tensors to batch.
batch_size: batch size.
batch_queue_capacity: max capacity of the queue from which the tensors are
batched.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: max capacity of the queue used to prefetch
assembled batches.'
| def __init__(self, tensor_dict, batch_size, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity):
| static_shapes = collections.OrderedDict({key: tensor.get_shape() for (key, tensor) in tensor_dict.iteritems()})
runtime_shapes = collections.OrderedDict({(key, 'runtime_shapes'): tf.shape(tensor) for (key, tensor) in tensor_dict.iteritems()})
all_tensors = tensor_dict
all_tensors.update(runtime_shapes)
batched_tensors = tf.train.batch(all_tensors, capacity=batch_queue_capacity, batch_size=batch_size, dynamic_pad=True, num_threads=num_batch_queue_threads)
self._queue = prefetcher.prefetch(batched_tensors, prefetch_queue_capacity)
self._static_shapes = static_shapes
self._batch_size = batch_size
|
'Dequeues a batch of tensor_dict from the BatchQueue.
TODO: use allow_smaller_final_batch to allow running over the whole eval set
Returns:
A list of tensor_dicts of the requested batch_size.'
| def dequeue(self):
| batched_tensors = self._queue.dequeue()
tensors = {}
shapes = {}
for (key, batched_tensor) in batched_tensors.iteritems():
unbatched_tensor_list = tf.unstack(batched_tensor)
for (i, unbatched_tensor) in enumerate(unbatched_tensor_list):
if (isinstance(key, tuple) and (key[1] == 'runtime_shapes')):
shapes[(key[0], i)] = unbatched_tensor
else:
tensors[(key, i)] = unbatched_tensor
tensor_dict_list = []
batch_size = self._batch_size
for batch_id in range(batch_size):
tensor_dict = {}
for key in self._static_shapes:
tensor_dict[key] = tf.slice(tensors[(key, batch_id)], tf.zeros_like(shapes[(key, batch_id)]), shapes[(key, batch_id)])
tensor_dict[key].set_shape(self._static_shapes[key])
tensor_dict_list.append(tensor_dict)
return tensor_dict_list
|
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).'
| def __init__(self, is_training, num_classes):
| self._is_training = is_training
self._num_classes = num_classes
|
'Computes encoded object locations and corresponding confidences.
Takes a high level image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding class scores for each corresponding box.
In this interface, we only assume that two tensors are returned as output
and do not assume anything about their shapes.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
scope: Variable and Op scope name.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.'
| def predict(self, image_features, num_predictions_per_location, scope, **params):
| with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location, **params)
|
'Implementations must override this method.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.'
| @abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
| pass
|
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: Slim arg_scope with hyperparameters for conolutional
layers.
num_spatial_bins: A list of two integers `[spatial_bins_y,
spatial_bins_x]`.
depth: Target depth to reduce the input feature maps to.
crop_size: A list of two integers `[crop_height, crop_width]`.
box_code_size: Size of encoding for each box.'
| def __init__(self, is_training, num_classes, conv_hyperparams, num_spatial_bins, depth, crop_size, box_code_size):
| super(RfcnBoxPredictor, self).__init__(is_training, num_classes)
self._conv_hyperparams = conv_hyperparams
self._num_spatial_bins = num_spatial_bins
self._depth = depth
self._crop_size = crop_size
self._box_code_size = box_code_size
|
'Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Currently, this must be set to 1, or an error will be raised.
proposal_boxes: A float tensor of shape [batch_size, num_proposals,
box_code_size].
Returns:
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the
location of the objects.
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_classes + 1] representing the class
predictions for the proposals.
Raises:
ValueError: if num_predictions_per_location is not 1.'
| def _predict(self, image_features, num_predictions_per_location, proposal_boxes):
| if (num_predictions_per_location != 1):
raise ValueError('Currently RfcnBoxPredictor only supports predicting a single box per class per location.')
batch_size = tf.shape(proposal_boxes)[0]
num_boxes = tf.shape(proposal_boxes)[1]
def get_box_indices(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(((dim is None) for dim in proposals_shape)):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape((ones_mat * multiplier), [(-1)])
net = image_features
with slim.arg_scope(self._conv_hyperparams):
net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')
location_feature_map_depth = (((self._num_spatial_bins[0] * self._num_spatial_bins[1]) * self.num_classes) * self._box_code_size)
location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations')
box_encodings = ops.position_sensitive_crop_regions(location_feature_map, boxes=tf.reshape(proposal_boxes, [(-1), self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)
box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2])
box_encodings = tf.reshape(box_encodings, [(batch_size * num_boxes), 1, self.num_classes, self._box_code_size])
total_classes = (self.num_classes + 1)
class_feature_map_depth = ((self._num_spatial_bins[0] * self._num_spatial_bins[1]) * total_classes)
class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions')
class_predictions_with_background = ops.position_sensitive_crop_regions(class_feature_map, boxes=tf.reshape(proposal_boxes, [(-1), self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)
class_predictions_with_background = tf.squeeze(class_predictions_with_background, squeeze_dims=[1, 2])
class_predictions_with_background = tf.reshape(class_predictions_with_background, [(batch_size * num_boxes), 1, total_classes])
return {BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background}
|
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: Slim arg_scope with hyperparameters for fully
connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
conv_hyperparams: Slim arg_scope with hyperparameters for convolution
ops.
predict_instance_masks: Whether to predict object masks inside detection
boxes.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediciton branch.
predict_keypoints: Whether to predict keypoints insde detection boxes.
Raises:
ValueError: If predict_instance_masks or predict_keypoints is true.'
| def __init__(self, is_training, num_classes, fc_hyperparams, use_dropout, dropout_keep_prob, box_code_size, conv_hyperparams=None, predict_instance_masks=False, mask_prediction_conv_depth=256, predict_keypoints=False):
| super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)
self._fc_hyperparams = fc_hyperparams
self._use_dropout = use_dropout
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._conv_hyperparams = conv_hyperparams
self._predict_instance_masks = predict_instance_masks
self._mask_prediction_conv_depth = mask_prediction_conv_depth
self._predict_keypoints = predict_keypoints
if self._predict_keypoints:
raise ValueError('Keypoint prediction is unimplemented.')
if ((self._predict_instance_masks or self._predict_keypoints) and (self._conv_hyperparams is None)):
raise ValueError('`conv_hyperparams` must be provided when predicting masks.')
|
'Computes encoded object locations and corresponding confidences.
Flattens image_features and applies fully connected ops (with no
non-linearity) to predict box encodings and class predictions. In this
setting, anchors are not spatially arranged in any way and are assumed to
have been folded into the batch dimension. Thus we output 1 for the
anchors dimension.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Currently, this must be set to 1, or an error will be raised.
Returns:
A dictionary containing the following tensors.
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the
location of the objects.
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_classes + 1] representing the class
predictions for the proposals.
If predict_masks is True the dictionary also contains:
instance_masks: A float tensor of shape
[batch_size, 1, num_classes, image_height, image_width]
If predict_keypoints is True the dictionary also contains:
keypoints: [batch_size, 1, num_keypoints, 2]
Raises:
ValueError: if num_predictions_per_location is not 1.'
| def _predict(self, image_features, num_predictions_per_location):
| if (num_predictions_per_location != 1):
raise ValueError('Currently FullyConnectedBoxPredictor only supports predicting a single box per class per location.')
spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2], keep_dims=True, name='AvgPool')
flattened_image_features = slim.flatten(spatial_averaged_image_features)
if self._use_dropout:
flattened_image_features = slim.dropout(flattened_image_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training)
with slim.arg_scope(self._fc_hyperparams):
box_encodings = slim.fully_connected(flattened_image_features, (self._num_classes * self._box_code_size), activation_fn=None, scope='BoxEncodingPredictor')
class_predictions_with_background = slim.fully_connected(flattened_image_features, (self._num_classes + 1), activation_fn=None, scope='ClassPredictor')
box_encodings = tf.reshape(box_encodings, [(-1), 1, self._num_classes, self._box_code_size])
class_predictions_with_background = tf.reshape(class_predictions_with_background, [(-1), 1, (self._num_classes + 1)])
predictions_dict = {BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background}
if self._predict_instance_masks:
with slim.arg_scope(self._conv_hyperparams):
upsampled_features = slim.conv2d_transpose(image_features, num_outputs=self._mask_prediction_conv_depth, kernel_size=[2, 2], stride=2)
mask_predictions = slim.conv2d(upsampled_features, num_outputs=self.num_classes, activation_fn=None, kernel_size=[1, 1])
instance_masks = tf.expand_dims(tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor')
predictions_dict[MASK_PREDICTIONS] = instance_masks
return predictions_dict
|
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: Slim arg_scope with hyperparameters for convolution ops.
min_depth: Minumum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout for class prediction or not.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: if True, apply the sigmoid on the output
class_predictions.
Raises:
ValueError: if min_depth > max_depth.'
| def __init__(self, is_training, num_classes, conv_hyperparams, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, apply_sigmoid_to_scores=False):
| super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)
if (min_depth > max_depth):
raise ValueError('min_depth should be less than or equal to max_depth')
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._max_depth = max_depth
self._num_layers_before_predictor = num_layers_before_predictor
self._use_dropout = use_dropout
self._kernel_size = kernel_size
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._apply_sigmoid_to_scores = apply_sigmoid_to_scores
|
'Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Returns:
A dictionary containing the following tensors.
box_encodings: A float tensor of shape [batch_size, num_anchors, 1,
code_size] representing the location of the objects, where
num_anchors = feat_height * feat_width * num_predictions_per_location
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.'
| def _predict(self, image_features, num_predictions_per_location):
| features_depth = static_shape.get_depth(image_features.get_shape())
depth = max(min(features_depth, self._max_depth), self._min_depth)
num_class_slots = (self.num_classes + 1)
net = image_features
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.dropout], is_training=self._is_training):
if ((depth > 0) and (self._num_layers_before_predictor > 0)):
for i in range(self._num_layers_before_predictor):
net = slim.conv2d(net, depth, [1, 1], scope=('Conv2d_%d_1x1_%d' % (i, depth)))
with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None, normalizer_params=None):
box_encodings = slim.conv2d(net, (num_predictions_per_location * self._box_code_size), [self._kernel_size, self._kernel_size], scope='BoxEncodingPredictor')
if self._use_dropout:
net = slim.dropout(net, keep_prob=self._dropout_keep_prob)
class_predictions_with_background = slim.conv2d(net, (num_predictions_per_location * num_class_slots), [self._kernel_size, self._kernel_size], scope='ClassPredictor')
if self._apply_sigmoid_to_scores:
class_predictions_with_background = tf.sigmoid(class_predictions_with_background)
batch_size = static_shape.get_batch_size(image_features.get_shape())
if (batch_size is None):
features_height = static_shape.get_height(image_features.get_shape())
features_width = static_shape.get_width(image_features.get_shape())
flattened_predictions_size = ((features_height * features_width) * num_predictions_per_location)
box_encodings = tf.reshape(box_encodings, [(-1), flattened_predictions_size, 1, self._box_code_size])
class_predictions_with_background = tf.reshape(class_predictions_with_background, [(-1), flattened_predictions_size, num_class_slots])
else:
box_encodings = tf.reshape(box_encodings, [batch_size, (-1), 1, self._box_code_size])
class_predictions_with_background = tf.reshape(class_predictions_with_background, [batch_size, (-1), num_class_slots])
return {BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background}
|
'Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor'
| def __init__(self, match_results):
| if (match_results.shape.ndims != 1):
raise ValueError('match_results should have rank 1')
if (match_results.dtype != tf.int32):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self._match_results = match_results
|
'The accessor for match results.
Returns:
the tensor which encodes the match results.'
| @property
def match_results(self):
| return self._match_results
|
'Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def matched_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.greater(self._match_results, (-1))))
|
'Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def matched_column_indicator(self):
| return tf.greater_equal(self._match_results, 0)
|
'Returns number (int32 scalar tensor) of matched columns.'
| def num_matched_columns(self):
| return tf.size(self.matched_column_indices())
|
'Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.equal(self._match_results, (-1))))
|
'Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_column_indicator(self):
| return tf.equal(self._match_results, (-1))
|
'Returns number (int32 scalar tensor) of unmatched columns.'
| def num_unmatched_columns(self):
| return tf.size(self.unmatched_column_indices())
|
'Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def ignored_column_indices(self):
| return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
|
'Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.'
| def ignored_column_indicator(self):
| return tf.equal(self._match_results, (-2))
|
'Returns number (int32 scalar tensor) of matched columns.'
| def num_ignored_columns(self):
| return tf.size(self.ignored_column_indices())
|
'Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_or_ignored_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
|
'Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.'
| def matched_row_indices(self):
| return self._reshape_and_cast(tf.gather(self._match_results, self.matched_column_indices()))
|
'Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to \'Match\' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.'
| def match(self, similarity_matrix, scope=None, **params):
| with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params))
|
'Method to be overriden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).'
| @abstractmethod
def _match(self, similarity_matrix, **params):
| pass
|
'Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.'
| @abstractmethod
def Decode(self, data):
| pass
|
'Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.'
| @abstractmethod
def name_scope(self):
| pass
|
'Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.'
| @property
def check_num_anchors(self):
| return True
|
Subsets and Splits