repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/delf/delf/python/training/model/__init__.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELF model module, used for training and exporting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
from delf.python.training.model import export_model_utils
from delf.python.training.model import resnet50
# pylint: enable=unused-import
| 1,125 | 42.307692 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_model_utils.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for DELF model exporting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from delf import feature_extractor
from delf.python.datasets.google_landmarks_dataset import googlelandmarks as gld
from object_detection.core import box_list
from object_detection.core import box_list_ops
# TODO(andrearaujo): Rewrite this function to be more similar to
# "ExtractLocalAndGlobalFeatures" below, leveraging autograph to avoid the need
# for tf.while loop.
def ExtractLocalFeatures(image, image_scales, max_feature_num, abs_thres, iou,
attention_model_fn, stride_factor):
"""Extract local features for input image.
Args:
image: image tensor of type tf.uint8 with shape [h, w, channels].
image_scales: 1D float tensor which contains float scales used for image
pyramid construction.
max_feature_num: int tensor denoting the maximum selected feature points.
abs_thres: float tensor denoting the score threshold for feature selection.
iou: float scalar denoting the iou threshold for NMS.
attention_model_fn: model function. Follows the signature:
* Args:
* `images`: Image tensor which is re-scaled.
* Returns:
* `attention_prob`: attention map after the non-linearity.
* `feature_map`: feature map after ResNet convolution.
stride_factor: integer accounting for striding after block3.
Returns:
boxes: [N, 4] float tensor which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
features: [N, depth] float tensor.
feature_scales: [N] float tensor. It is the inverse of the input image
scales such that larger image scales correspond to larger image regions,
which is compatible with keypoints detected with other techniques, for
example Congas.
scores: [N, 1] float tensor denoting the attention score.
"""
original_image_shape_float = tf.gather(
tf.dtypes.cast(tf.shape(image), tf.float32), [0, 1])
image_tensor = gld.NormalizeImages(
image, pixel_value_offset=128.0, pixel_value_scale=128.0)
image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims')
# Hard code the feature depth and receptive field parameters for now.
# We need to revisit this once we change the architecture and selected
# convolutional blocks to use as local features.
rf, stride, padding = [291.0, 16.0 * stride_factor, 145.0]
feature_depth = 1024
def _ProcessSingleScale(scale_index, boxes, features, scales, scores):
"""Resizes the image and run feature extraction and keypoint selection.
This function will be passed into tf.while_loop() and be called
repeatedly. The input boxes are collected from the previous iteration
[0: scale_index -1]. We get the current scale by
image_scales[scale_index], and run resize image, feature extraction and
keypoint selection. Then we will get a new set of selected_boxes for
current scale. In the end, we concat the previous boxes with current
selected_boxes as the output.
Args:
scale_index: A valid index in the image_scales.
boxes: Box tensor with the shape of [N, 4].
features: Feature tensor with the shape of [N, depth].
scales: Scale tensor with the shape of [N].
scores: Attention score tensor with the shape of [N].
Returns:
scale_index: The next scale index for processing.
boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
features: Concatenated feature tensor with the shape of [K, depth].
scales: Concatenated scale tensor with the shape of [K].
scores: Concatenated score tensor with the shape of [K].
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.dtypes.cast(
tf.round(original_image_shape_float * scale), tf.int32)
resized_image = tf.image.resize(image_tensor, new_image_size)
attention_prob, feature_map = attention_model_fn(resized_image)
attention_prob = tf.squeeze(attention_prob, axis=[0])
feature_map = tf.squeeze(feature_map, axis=[0])
rf_boxes = feature_extractor.CalculateReceptiveBoxes(
tf.shape(feature_map)[0],
tf.shape(feature_map)[1], rf, stride, padding)
# Re-project back to the original image space.
rf_boxes = tf.divide(rf_boxes, scale)
attention_prob = tf.reshape(attention_prob, [-1])
feature_map = tf.reshape(feature_map, [-1, feature_depth])
# Use attention score to select feature vectors.
indices = tf.reshape(tf.where(attention_prob >= abs_thres), [-1])
selected_boxes = tf.gather(rf_boxes, indices)
selected_features = tf.gather(feature_map, indices)
selected_scores = tf.gather(attention_prob, indices)
selected_scales = tf.ones_like(selected_scores, tf.float32) / scale
# Concat with the previous result from different scales.
boxes = tf.concat([boxes, selected_boxes], 0)
features = tf.concat([features, selected_features], 0)
scales = tf.concat([scales, selected_scales], 0)
scores = tf.concat([scores, selected_scores], 0)
return scale_index + 1, boxes, features, scales, scores
output_boxes = tf.zeros([0, 4], dtype=tf.float32)
output_features = tf.zeros([0, feature_depth], dtype=tf.float32)
output_scales = tf.zeros([0], dtype=tf.float32)
output_scores = tf.zeros([0], dtype=tf.float32)
# Process the first scale separately, the following scales will reuse the
# graph variables.
(_, output_boxes, output_features, output_scales,
output_scores) = _ProcessSingleScale(0, output_boxes, output_features,
output_scales, output_scores)
i = tf.constant(1, dtype=tf.int32)
num_scales = tf.shape(image_scales)[0]
keep_going = lambda j, b, f, scales, scores: tf.less(j, num_scales)
(_, output_boxes, output_features, output_scales,
output_scores) = tf.nest.map_structure(
tf.stop_gradient,
tf.while_loop(
cond=keep_going,
body=_ProcessSingleScale,
loop_vars=[
i, output_boxes, output_features, output_scales, output_scores
],
shape_invariants=[
i.get_shape(),
tf.TensorShape([None, 4]),
tf.TensorShape([None, feature_depth]),
tf.TensorShape([None]),
tf.TensorShape([None])
]))
feature_boxes = box_list.BoxList(output_boxes)
feature_boxes.add_field('features', output_features)
feature_boxes.add_field('scales', output_scales)
feature_boxes.add_field('scores', output_scores)
nms_max_boxes = tf.minimum(max_feature_num, feature_boxes.num_boxes())
final_boxes = box_list_ops.non_max_suppression(feature_boxes, iou,
nms_max_boxes)
return final_boxes.get(), final_boxes.get_field(
'features'), final_boxes.get_field('scales'), tf.expand_dims(
final_boxes.get_field('scores'), 1)
@tf.function
def ExtractGlobalFeatures(image,
image_scales,
global_scales_ind,
model_fn,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
normalization_function=gld.NormalizeImages):
"""Extract global features for input image.
Args:
image: image tensor of type tf.uint8 with shape [h, w, channels].
image_scales: 1D float tensor which contains float scales used for image
pyramid construction.
global_scales_ind: Feature extraction happens only for a subset of
`image_scales`, those with corresponding indices from this tensor.
model_fn: model function. Follows the signature:
* Args:
* `images`: Batched image tensor.
* Returns:
* `global_descriptors`: Global descriptors for input images.
multi_scale_pool_type: If set, the global descriptor of each scale is pooled
and a 1D global descriptor is returned.
normalize_global_descriptor: If True, output global descriptors are
L2-normalized.
normalization_function: Function used for normalization.
Returns:
global_descriptors: If `multi_scale_pool_type` is 'None', returns a [S, D]
float tensor. S is the number of scales, and D the global descriptor
dimensionality. Each D-dimensional entry is a global descriptor, which may
be L2-normalized depending on `normalize_global_descriptor`. If
`multi_scale_pool_type` is not 'None', returns a [D] float tensor with the
pooled global descriptor.
"""
original_image_shape_float = tf.gather(
tf.dtypes.cast(tf.shape(image), tf.float32), [0, 1])
image_tensor = normalization_function(
image, pixel_value_offset=128.0, pixel_value_scale=128.0)
image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims')
def _ResizeAndExtract(scale_index):
"""Helper function to resize image then extract global feature.
Args:
scale_index: A valid index in image_scales.
Returns:
global_descriptor: [1,D] tensor denoting the extracted global descriptor.
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.dtypes.cast(
tf.round(original_image_shape_float * scale), tf.int32)
resized_image = tf.image.resize(image_tensor, new_image_size)
global_descriptor = model_fn(resized_image)
return global_descriptor
# First loop to find initial scale to be used.
num_scales = tf.shape(image_scales)[0]
initial_scale_index = tf.constant(-1, dtype=tf.int32)
for scale_index in tf.range(num_scales):
if tf.reduce_any(tf.equal(global_scales_ind, scale_index)):
initial_scale_index = scale_index
break
output_global = _ResizeAndExtract(initial_scale_index)
# Loop over subsequent scales.
for scale_index in tf.range(initial_scale_index + 1, num_scales):
# Allow an undefined number of global feature scales to be extracted.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(output_global, tf.TensorShape([None, None]))])
if tf.reduce_any(tf.equal(global_scales_ind, scale_index)):
global_descriptor = _ResizeAndExtract(scale_index)
output_global = tf.concat([output_global, global_descriptor], 0)
normalization_axis = 1
if multi_scale_pool_type == 'average':
output_global = tf.reduce_mean(
output_global,
axis=0,
keepdims=False,
name='multi_scale_average_pooling')
normalization_axis = 0
elif multi_scale_pool_type == 'sum':
output_global = tf.reduce_sum(
output_global, axis=0, keepdims=False, name='multi_scale_sum_pooling')
normalization_axis = 0
if normalize_global_descriptor:
output_global = tf.nn.l2_normalize(
output_global, axis=normalization_axis, name='l2_normalization')
return output_global
@tf.function
def ExtractLocalAndGlobalFeatures(image, image_scales, max_feature_num,
abs_thres, global_scales_ind, iou, model_fn,
stride_factor):
"""Extract local+global features for input image.
Args:
image: image tensor of type tf.uint8 with shape [h, w, channels].
image_scales: 1D float tensor which contains float scales used for image
pyramid construction.
max_feature_num: int tensor denoting the maximum selected feature points.
abs_thres: float tensor denoting the score threshold for feature selection.
global_scales_ind: Global feature extraction happens only for a subset of
`image_scales`, those with corresponding indices from this tensor.
iou: float scalar denoting the iou threshold for NMS.
model_fn: model function. Follows the signature:
* Args:
* `images`: Batched image tensor.
* Returns:
* `global_descriptors`: Global descriptors for input images.
* `attention_prob`: Attention map after the non-linearity.
* `feature_map`: Feature map after ResNet convolution.
stride_factor: integer accounting for striding after block3.
Returns:
boxes: [N, 4] float tensor which denotes the selected receptive boxes. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
local_descriptors: [N, depth] float tensor.
feature_scales: [N] float tensor. It is the inverse of the input image
scales such that larger image scales correspond to larger image regions,
which is compatible with keypoints detected with other techniques, for
example Congas.
scores: [N, 1] float tensor denoting the attention score.
global_descriptors: [S, D] float tensor, with the global descriptors for
each scale; S is the number of scales, and D the global descriptor
dimensionality.
"""
original_image_shape_float = tf.gather(
tf.dtypes.cast(tf.shape(image), tf.float32), [0, 1])
image_tensor = gld.NormalizeImages(
image, pixel_value_offset=128.0, pixel_value_scale=128.0)
image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims')
# Hard code the receptive field parameters for now.
# We need to revisit this once we change the architecture and selected
# convolutional blocks to use as local features.
rf, stride, padding = [291.0, 16.0 * stride_factor, 145.0]
def _ResizeAndExtract(scale_index):
"""Helper function to resize image then extract features.
Args:
scale_index: A valid index in image_scales.
Returns:
global_descriptor: [1,D] tensor denoting the extracted global descriptor.
boxes: Box tensor with the shape of [K, 4].
local_descriptors: Local descriptor tensor with the shape of [K, depth].
scales: Scale tensor with the shape of [K].
scores: Score tensor with the shape of [K].
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.dtypes.cast(
tf.round(original_image_shape_float * scale), tf.int32)
resized_image = tf.image.resize(image_tensor, new_image_size)
global_descriptor, attention_prob, feature_map = model_fn(resized_image)
attention_prob = tf.squeeze(attention_prob, axis=[0])
feature_map = tf.squeeze(feature_map, axis=[0])
# Compute RF boxes and re-project them to the original image space.
rf_boxes = feature_extractor.CalculateReceptiveBoxes(
tf.shape(feature_map)[0],
tf.shape(feature_map)[1], rf, stride, padding)
rf_boxes = tf.divide(rf_boxes, scale)
attention_prob = tf.reshape(attention_prob, [-1])
feature_map = tf.reshape(feature_map, [-1, tf.shape(feature_map)[2]])
# Use attention score to select local features.
indices = tf.reshape(tf.where(attention_prob >= abs_thres), [-1])
boxes = tf.gather(rf_boxes, indices)
local_descriptors = tf.gather(feature_map, indices)
scores = tf.gather(attention_prob, indices)
scales = tf.ones_like(scores, tf.float32) / scale
return global_descriptor, boxes, local_descriptors, scales, scores
# TODO(andrearaujo): Currently, a global feature is extracted even for scales
# which are not using it. The obtained result is correct, however feature
# extraction is slower than expected. We should try to fix this in the future.
# Run first scale.
(output_global_descriptors, output_boxes, output_local_descriptors,
output_scales, output_scores) = _ResizeAndExtract(0)
if not tf.reduce_any(tf.equal(global_scales_ind, 0)):
# If global descriptor is not using the first scale, clear it out.
output_global_descriptors = tf.zeros(
[0, tf.shape(output_global_descriptors)[1]])
# Loop over subsequent scales.
num_scales = tf.shape(image_scales)[0]
for scale_index in tf.range(1, num_scales):
# Allow an undefined number of global feature scales to be extracted.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(output_global_descriptors,
tf.TensorShape([None, None]))])
(global_descriptor, boxes, local_descriptors, scales,
scores) = _ResizeAndExtract(scale_index)
output_boxes = tf.concat([output_boxes, boxes], 0)
output_local_descriptors = tf.concat(
[output_local_descriptors, local_descriptors], 0)
output_scales = tf.concat([output_scales, scales], 0)
output_scores = tf.concat([output_scores, scores], 0)
if tf.reduce_any(tf.equal(global_scales_ind, scale_index)):
output_global_descriptors = tf.concat(
[output_global_descriptors, global_descriptor], 0)
feature_boxes = box_list.BoxList(output_boxes)
feature_boxes.add_field('local_descriptors', output_local_descriptors)
feature_boxes.add_field('scales', output_scales)
feature_boxes.add_field('scores', output_scores)
nms_max_boxes = tf.minimum(max_feature_num, feature_boxes.num_boxes())
final_boxes = box_list_ops.non_max_suppression(feature_boxes, iou,
nms_max_boxes)
return (final_boxes.get(), final_boxes.get_field('local_descriptors'),
final_boxes.get_field('scales'),
tf.expand_dims(final_boxes.get_field('scores'),
1), output_global_descriptors)
| 18,074 | 42.978102 | 80 | py |
models | models-master/research/delf/delf/python/training/model/global_model.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CNN Image Retrieval model implementation based on the following papers:
[1] Fine-tuning CNN Image Retrieval with No Human Annotation,
Radenović F., Tolias G., Chum O., TPAMI 2018 [arXiv]
https://arxiv.org/abs/1711.02512
[2] CNN Image Retrieval Learns from BoW: Unsupervised Fine-Tuning with Hard
Examples, Radenović F., Tolias G., Chum O., ECCV 2016 [arXiv]
https://arxiv.org/abs/1604.02426
"""
import os
import pickle
import tensorflow as tf
from delf.python.datasets import generic_dataset
from delf.python.normalization_layers import normalization
from delf.python.pooling_layers import pooling as pooling_layers
from delf.python.training import global_features_utils
# Pre-computed global whitening, for most commonly used architectures.
# Using pre-computed whitening improves the speed of the convergence and the
# performance.
_WHITENING_CONFIG = {
'ResNet50': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet50_gem_learned_whitening_config.pkl',
'ResNet101': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet101_gem_learned_whitening_config.pkl',
'ResNet152': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet152_gem_learned_whitening_config.pkl',
'VGG19': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_VGG19_gem_learned_whitening_config.pkl'
}
# Possible global pooling layers.
_POOLING = {
'mac': pooling_layers.MAC,
'spoc': pooling_layers.SPoC,
'gem': pooling_layers.GeM
}
# Output dimensionality for supported architectures.
_OUTPUT_DIM = {
'VGG16': 512,
'VGG19': 512,
'ResNet50': 2048,
'ResNet101': 2048,
'ResNet101V2': 2048,
'ResNet152': 2048,
'DenseNet121': 1024,
'DenseNet169': 1664,
'DenseNet201': 1920,
'EfficientNetB5': 2048,
'EfficientNetB7': 2560
}
class GlobalFeatureNet(tf.keras.Model):
"""Instantiates global model for image retrieval.
This class implements the [GlobalFeatureNet](
https://arxiv.org/abs/1711.02512) for image retrieval. The model uses a
user-defined model as a backbone.
"""
def __init__(self, architecture='ResNet101', pooling='gem',
whitening=False, pretrained=True, data_root=''):
"""GlobalFeatureNet network initialization.
Args:
architecture: Network backbone.
pooling: Pooling method used 'mac'/'spoc'/'gem'.
whitening: Bool, whether to use whitening.
pretrained: Bool, whether to initialize the network with the weights
pretrained on ImageNet.
data_root: String, path to the data folder where the precomputed
whitening is/will be saved in case `whitening` is True.
Raises:
ValueError: If `architecture` is not supported.
"""
if architecture not in _OUTPUT_DIM.keys():
raise ValueError("Architecture {} is not supported.".format(architecture))
super(GlobalFeatureNet, self).__init__()
# Get standard output dimensionality size.
dim = _OUTPUT_DIM[architecture]
if pretrained:
# Initialize with network pretrained on imagenet.
net_in = getattr(tf.keras.applications, architecture)(include_top=False,
weights="imagenet")
else:
# Initialize with random weights.
net_in = getattr(tf.keras.applications, architecture)(include_top=False,
weights=None)
# Initialize `feature_extractor`. Take only convolutions for
# `feature_extractor`, always end with ReLU to make last activations
# non-negative.
if architecture.lower().startswith('densenet'):
tmp_model = tf.keras.Sequential()
tmp_model.add(net_in)
net_in = tmp_model
net_in.add(tf.keras.layers.ReLU())
# Initialize pooling.
self.pool = _POOLING[pooling]()
# Initialize whitening.
if whitening:
if pretrained and architecture in _WHITENING_CONFIG:
# If precomputed whitening for the architecture exists,
# the fully-connected layer is going to be initialized according to
# the precomputed layer configuration.
global_features_utils.debug_and_log(
">> {}: for '{}' custom computed whitening '{}' is used."
.format(os.getcwd(), architecture,
os.path.basename(_WHITENING_CONFIG[architecture])))
# The layer configuration is downloaded to the `data_root` folder.
whiten_dir = os.path.join(data_root, architecture)
path = tf.keras.utils.get_file(fname=whiten_dir,
origin=_WHITENING_CONFIG[architecture])
# Whitening configuration is loaded.
with tf.io.gfile.GFile(path, 'rb') as learned_whitening_file:
whitening_config = pickle.load(learned_whitening_file)
# Whitening layer is initialized according to the configuration.
self.whiten = tf.keras.layers.Dense.from_config(whitening_config)
else:
# In case if no precomputed whitening exists for the chosen
# architecture, the fully-connected whitening layer is initialized
# with the random weights.
self.whiten = tf.keras.layers.Dense(dim, activation=None, use_bias=True)
global_features_utils.debug_and_log(
">> There is either no whitening computed for the "
"used network architecture or pretrained is False,"
" random weights are used.")
else:
self.whiten = None
# Create meta information to be stored in the network.
self.meta = {
'architecture': architecture,
'pooling': pooling,
'whitening': whitening,
'outputdim': dim
}
self.feature_extractor = net_in
self.normalize = normalization.L2Normalization()
def call(self, x, training=False):
"""Invokes the GlobalFeatureNet instance.
Args:
x: [B, H, W, C] Tensor with a batch of images.
training: Indicator of whether the forward pass is running in training
mode or not.
Returns:
out: [B, out_dim] Global descriptor.
"""
# Forward pass through the fully-convolutional backbone.
o = self.feature_extractor(x, training)
# Pooling.
o = self.pool(o)
# Normalization.
o = self.normalize(o)
# If whitening exists: the pooled global descriptor is whitened and
# re-normalized.
if self.whiten is not None:
o = self.whiten(o)
o = self.normalize(o)
return o
def meta_repr(self):
'''Provides high-level information about the network.
Returns:
meta: string with the information about the network (used
architecture, pooling type, whitening, outputdim).
'''
tmpstr = '(meta):\n'
tmpstr += '\tarchitecture: {}\n'.format(self.meta['architecture'])
tmpstr += '\tpooling: {}\n'.format(self.meta['pooling'])
tmpstr += '\twhitening: {}\n'.format(self.meta['whitening'])
tmpstr += '\toutputdim: {}\n'.format(self.meta['outputdim'])
return tmpstr
def extract_global_descriptors_from_list(net, images, image_size,
bounding_boxes=None, scales=[1.],
multi_scale_power=1., print_freq=10):
"""Extracting global descriptors from a list of images.
Args:
net: Model object, network for the forward pass.
images: Absolute image paths as strings.
image_size: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
scales: List of float scales.
multi_scale_power: Float, multi-scale normalization power parameter.
print_freq: Printing frequency for debugging.
Returns:
descriptors: Global descriptors for the input images.
"""
# Creating dataset loader.
data = generic_dataset.ImagesFromList(root='', image_paths=images,
imsize=image_size,
bounding_boxes=bounding_boxes)
def _data_gen():
return (inst for inst in data)
loader = tf.data.Dataset.from_generator(_data_gen, output_types=(tf.float32))
loader = loader.batch(1)
# Extracting vectors.
descriptors = tf.zeros((0, net.meta['outputdim']))
for i, input in enumerate(loader):
if len(scales) == 1 and scales[0] == 1:
descriptors = tf.concat([descriptors, net(input)], 0)
else:
descriptors = tf.concat(
[descriptors, extract_multi_scale_descriptor(
net, input, scales, multi_scale_power)], 0)
if (i + 1) % print_freq == 0 or (i + 1) == len(images):
global_features_utils.debug_and_log(
'\r>>>> {}/{} done...'.format((i + 1), len(images)),
debug_on_the_same_line=True)
global_features_utils.debug_and_log('', log=False)
descriptors = tf.transpose(descriptors, perm=[1, 0])
return descriptors
def extract_multi_scale_descriptor(net, input, scales, multi_scale_power):
"""Extracts the global descriptor multi scale.
Args:
net: Model object, network for the forward pass.
input: [B, H, W, C] input tensor in channel-last (BHWC) configuration.
scales: List of float scales.
multi_scale_power: Float, multi-scale normalization power parameter.
Returns:
descriptors: Multi-scale global descriptors for the input images.
"""
descriptors = tf.zeros(net.meta['outputdim'])
for s in scales:
if s == 1:
input_t = input
else:
output_shape = s * tf.shape(input)[1:3].numpy()
input_t = tf.image.resize(input, output_shape,
method='bilinear',
preserve_aspect_ratio=True)
descriptors += tf.pow(net(input_t), multi_scale_power)
descriptors /= len(scales)
descriptors = tf.pow(descriptors, 1. / multi_scale_power)
descriptors /= tf.norm(descriptors)
return descriptors
| 10,632 | 36.178322 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_CNN_global.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global CNN feature tensorflow inference model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import global_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', None, help='Path to saved checkpoint.')
flags.DEFINE_string('export_path', None,
help='Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input '
'end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input '
'scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each "
"scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
# Network architecture and initialization options.
flags.DEFINE_string('arch', 'ResNet101',
'model architecture (default: ResNet101)')
flags.DEFINE_string('pool', 'gem', 'pooling options (default: gem)')
flags.DEFINE_boolean('whitening', False,
'train model with learnable whitening (linear layer) '
'after the pooling')
def _NormalizeImages(images, *args):
"""Normalize pixel values in image.
Args:
images: `Tensor`, images to normalize.
Returns:
normalized_images: `Tensor`, normalized images.
"""
tf.keras.applications.imagenet_utils.preprocess_input(images, mode='caffe')
return images
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global
descriptor.
input_scales_tensor: If None, the exported function to be used
should be ExtractFeatures, where an input end-point "input_scales" is
added for the exported model. If not None, the specified 1D tensor of
floats will be hard-coded as the desired input scales, in conjunction
with ExtractFeaturesFixedScales.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
self._model = global_model.GlobalFeatureNet(
FLAGS.arch, FLAGS.pool, FLAGS.whitening, pretrained=False)
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8,
name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(shape=[None], dtype=tf.int32,
name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales,
input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor,
normalization_function=_NormalizeImages())
named_output_tensors = {}
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module.
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| 6,604 | 36.95977 | 80 | py |
models | models-master/research/delf/delf/python/training/model/global_model_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GlobalFeatureNet backbone."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
from PIL import Image
import tensorflow as tf
from delf.python.training.model import global_model
FLAGS = flags.FLAGS
class GlobalFeatureNetTest(tf.test.TestCase):
"""Tests for the GlobalFeatureNet backbone."""
def testInitModel(self):
"""Testing GlobalFeatureNet initialization."""
# Testing GlobalFeatureNet initialization.
model_params = {'architecture': 'ResNet101', 'pooling': 'gem',
'whitening': False, 'pretrained': True}
model = global_model.GlobalFeatureNet(**model_params)
expected_meta = {'architecture': 'ResNet101', 'pooling': 'gem',
'whitening': False, 'outputdim': 2048}
self.assertEqual(expected_meta, model.meta)
def testExtractVectors(self):
"""Tests extraction of global descriptors from list."""
# Initializing network for testing.
model_params = {'architecture': 'ResNet101', 'pooling': 'gem',
'whitening': False, 'pretrained': True}
model = global_model.GlobalFeatureNet(**model_params)
# Number of images to be created.
n = 2
image_paths = []
# Create `n` dummy images.
for i in range(n):
dummy_image = np.random.rand(1024, 750, 3) * 255
img_out = Image.fromarray(dummy_image.astype('uint8')).convert('RGB')
filename = os.path.join(FLAGS.test_tmpdir, 'test_image_{}.jpg'.format(i))
img_out.save(filename)
image_paths.append(filename)
descriptors = global_model.extract_global_descriptors_from_list(
model, image_paths, image_size=1024, bounding_boxes=None,
scales=[1., 3.], multi_scale_power=2, print_freq=1)
self.assertAllEqual([2048, 2], tf.shape(descriptors))
def testExtractMultiScale(self):
"""Tests multi-scale global descriptor extraction."""
# Initializing network for testing.
model_params = {'architecture': 'ResNet101', 'pooling': 'gem',
'whitening': False, 'pretrained': True}
model = global_model.GlobalFeatureNet(**model_params)
input = tf.random.uniform([2, 1024, 750, 3], dtype=tf.float32, seed=0)
descriptors = global_model.extract_multi_scale_descriptor(
model, input, scales=[1., 3.], multi_scale_power=2)
self.assertAllEqual([2, 2048], tf.shape(descriptors))
if __name__ == '__main__':
tf.test.main()
| 3,230 | 36.137931 | 80 | py |
models | models-master/research/delf/delf/python/training/losses/ranking_losses.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ranking loss definitions."""
import tensorflow as tf
class ContrastiveLoss(tf.keras.losses.Loss):
"""Contrastive Loss layer.
Contrastive Loss layer allows to compute contrastive loss for a batch of
images. Implementation based on: https://arxiv.org/abs/1604.02426.
"""
def __init__(self, margin=0.7, reduction=tf.keras.losses.Reduction.NONE):
"""Initialization of Contrastive Loss layer.
Args:
margin: Float contrastive loss margin.
reduction: Type of loss reduction.
"""
super(ContrastiveLoss, self).__init__(reduction)
self.margin = margin
# Parameter for numerical stability.
self.eps = 1e-6
def __call__(self, queries, positives, negatives):
"""Invokes the Contrastive Loss instance.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
Returns:
loss: Scalar tensor.
"""
return contrastive_loss(
queries, positives, negatives, margin=self.margin, eps=self.eps)
class TripletLoss(tf.keras.losses.Loss):
"""Triplet Loss layer.
Triplet Loss layer computes triplet loss for a batch of images. Triplet
loss tries to keep all queries closer to positives than to any negatives.
Margin is used to specify when a triplet has become too "easy" and we no
longer want to adjust the weights from it. Differently from the Contrastive
Loss, Triplet Loss uses squared distances when computing the loss.
Implementation based on: https://arxiv.org/abs/1511.07247.
"""
def __init__(self, margin=0.1, reduction=tf.keras.losses.Reduction.NONE):
"""Initialization of Triplet Loss layer.
Args:
margin: Triplet loss margin.
reduction: Type of loss reduction.
"""
super(TripletLoss, self).__init__(reduction)
self.margin = margin
def __call__(self, queries, positives, negatives):
"""Invokes the Triplet Loss instance.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
Returns:
loss: Scalar tensor.
"""
return triplet_loss(queries, positives, negatives, margin=self.margin)
def contrastive_loss(queries, positives, negatives, margin=0.7, eps=1e-6):
"""Calculates Contrastive Loss.
We expect the `queries`, `positives` and `negatives` to be normalized with
unit length for training stability. The contrastive loss directly
optimizes this distance by encouraging all positive distances to
approach 0, while keeping negative distances above a certain threshold.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
margin: Float contrastive loss loss margin.
eps: Float parameter for numerical stability.
Returns:
loss: Scalar tensor.
"""
dim = tf.shape(queries)[1]
# Number of `queries`.
batch_size = tf.shape(queries)[0]
# Number of `positives`.
np = tf.shape(positives)[0]
# Number of `negatives`.
num_neg = tf.shape(negatives)[1]
# Preparing negatives.
stacked_negatives = tf.reshape(negatives, [num_neg * batch_size, dim])
# Preparing queries for further loss calculation.
stacked_queries = tf.repeat(queries, num_neg + 1, axis=0)
positives_and_negatives = tf.concat([positives, stacked_negatives], axis=0)
# Calculate an Euclidean norm for each pair of points. For any positive
# pair of data points this distance should be small, and for
# negative pair it should be large.
distances = tf.norm(stacked_queries - positives_and_negatives + eps, axis=1)
positives_part = 0.5 * tf.pow(distances[:np], 2.0)
negatives_part = 0.5 * tf.pow(
tf.math.maximum(margin - distances[np:], 0), 2.0)
# Final contrastive loss calculation.
loss = tf.reduce_sum(tf.concat([positives_part, negatives_part], 0))
return loss
def triplet_loss(queries, positives, negatives, margin=0.1):
"""Calculates Triplet Loss.
Triplet loss tries to keep all queries closer to positives than to any
negatives. Differently from the Contrastive Loss, Triplet Loss uses squared
distances when computing the loss.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
margin: Float triplet loss loss margin.
Returns:
loss: Scalar tensor.
"""
dim = tf.shape(queries)[1]
# Number of `queries`.
batch_size = tf.shape(queries)[0]
# Number of `negatives`.
num_neg = tf.shape(negatives)[1]
# Preparing negatives.
stacked_negatives = tf.reshape(negatives, [num_neg * batch_size, dim])
# Preparing queries for further loss calculation.
stacked_queries = tf.repeat(queries, num_neg, axis=0)
# Preparing positives for further loss calculation.
stacked_positives = tf.repeat(positives, num_neg, axis=0)
# Computes *squared* distances.
distance_positives = tf.reduce_sum(
tf.square(stacked_queries - stacked_positives), axis=1)
distance_negatives = tf.reduce_sum(
tf.square(stacked_queries - stacked_negatives), axis=1)
# Final triplet loss calculation.
loss = tf.reduce_sum(
tf.maximum(distance_positives - distance_negatives + margin, 0.0))
return loss
| 6,201 | 34.238636 | 80 | py |
models | models-master/research/delf/delf/python/training/losses/ranking_losses_test.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ranking losses."""
import tensorflow as tf
from delf.python.training.losses import ranking_losses
class RankingLossesTest(tf.test.TestCase):
def testContrastiveLoss(self):
# Testing the correct numeric value.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.contrastive_loss(queries, positives, negatives,
margin=0.7, eps=1e-6)
exp_output = 0.55278635
self.assertAllClose(exp_output, result)
def testTripletLossZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# smaller than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 0.0
self.assertAllClose(exp_output, result)
def testTripletLossNonZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# bigger than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-5.0, 0.0, 3.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-1.0, 2.0, 0.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 2.2520838
self.assertAllClose(exp_output, result)
if __name__ == '__main__':
tf.test.main()
| 2,530 | 40.491803 | 80 | py |
models | models-master/research/delf/delf/python/training/losses/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================== | 687 | 48.142857 | 80 | py |
models | models-master/research/delf/delf/python/datasets/generic_dataset.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for generic image dataset creation."""
import os
from delf.python.datasets import utils
class ImagesFromList():
"""A generic data loader that loads images from a list.
Supports images of different sizes.
"""
def __init__(self, root, image_paths, imsize=None, bounding_boxes=None,
loader=utils.default_loader):
"""ImagesFromList object initialization.
Args:
root: String, root directory path.
image_paths: List, relative image paths as strings.
imsize: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
loader: Callable, a function to load an image given its path.
Raises:
ValueError: Raised if `image_paths` list is empty.
"""
# List of the full image filenames.
images_filenames = [os.path.join(root, image_path) for image_path in
image_paths]
if not images_filenames:
raise ValueError("Dataset contains 0 images.")
self.root = root
self.images = image_paths
self.imsize = imsize
self.images_filenames = images_filenames
self.bounding_boxes = bounding_boxes
self.loader = loader
def __getitem__(self, index):
"""Called to load an image at the given `index`.
Args:
index: Integer, image index.
Returns:
image: Tensor, loaded image.
"""
path = self.images_filenames[index]
if self.bounding_boxes is not None:
img = self.loader(path, self.imsize, self.bounding_boxes[index])
else:
img = self.loader(path, self.imsize)
return img
def __len__(self):
"""Implements the built-in function len().
Returns:
len: Number of images in the dataset.
"""
return len(self.images_filenames)
| 2,515 | 29.682927 | 80 | py |
models | models-master/research/delf/delf/python/datasets/tuples_dataset.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tuple dataset module.
Based on the Radenovic et al. ECCV16: CNN image retrieval learns from BoW.
For more information refer to https://arxiv.org/abs/1604.02426.
"""
import os
import pickle
import numpy as np
import tensorflow as tf
from delf.python.datasets import utils as image_loading_utils
from delf.python.training import global_features_utils
from delf.python.training.model import global_model
class TuplesDataset():
"""Data loader that loads training and validation tuples.
After initialization, the function create_epoch_tuples() should be called to
create the dataset tuples. After that, the dataset can be iterated through
using next() function.
Tuples are based on Radenovic et al. ECCV16 work: CNN image retrieval
learns from BoW. For more information refer to
https://arxiv.org/abs/1604.02426.
"""
def __init__(self, name, mode, data_root, imsize=None, num_negatives=5,
num_queries=2000, pool_size=20000,
loader=image_loading_utils.default_loader, ims_root=None):
"""TuplesDataset object initialization.
Args:
name: String, dataset name. I.e. 'retrieval-sfm-120k'.
mode: 'train' or 'val' for training and validation parts of dataset.
data_root: Path to the root directory of the dataset.
imsize: Integer, defines the maximum size of longer image side transform.
num_negatives: Integer, number of negative images for a query image in a
training tuple.
num_queries: Integer, number of query images to be processed in one epoch.
pool_size: Integer, size of the negative image pool, from where the
hard-negative images are re-mined.
loader: Callable, a function to load an image given its path.
ims_root: String, image root directory.
Raises:
ValueError: If mode is not either 'train' or 'val'.
"""
if mode not in ['train', 'val']:
raise ValueError(
"`mode` argument should be either 'train' or 'val', passed as a "
"String.")
# Loading db.
db_filename = os.path.join(data_root, '{}.pkl'.format(name))
with tf.io.gfile.GFile(db_filename, 'rb') as f:
db = pickle.load(f)[mode]
# Initializing tuples dataset.
self._ims_root = data_root if ims_root is None else ims_root
self._name = name
self._mode = mode
self._imsize = imsize
self._clusters = db['cluster']
self._query_pool = db['qidxs']
self._positive_pool = db['pidxs']
if not hasattr(self, 'images'):
self.images = db['ids']
# Size of training subset for an epoch.
self._num_negatives = num_negatives
self._num_queries = min(num_queries, len(self._query_pool))
self._pool_size = min(pool_size, len(self.images))
self._qidxs = None
self._pidxs = None
self._nidxs = None
self._loader = loader
self._print_freq = 10
# Indexer for the iterator.
self._n = 0
def __iter__(self):
"""Function for making TupleDataset an iterator.
Returns:
iter: The iterator object itself (TupleDataset).
"""
return self
def __next__(self):
"""Function for making TupleDataset an iterator.
Returns:
next: The next item in the sequence (next dataset image tuple).
"""
if self._n < len(self._qidxs):
result = self.__getitem__(self._n)
self._n += 1
return result
else:
raise StopIteration
def _img_names_to_full_path(self, image_list):
"""Converts list of image names to the list of full paths to the images.
Args:
image_list: Image names, either a list or a single image path.
Returns:
image_full_paths: List of full paths to the images.
"""
if not isinstance(image_list, list):
return os.path.join(self._ims_root, image_list)
return [os.path.join(self._ims_root, img_name) for img_name in image_list]
def __getitem__(self, index):
"""Called to load an image tuple at the given `index`.
Args:
index: Integer, index.
Returns:
output: Tuple [q,p,n1,...,nN, target], loaded 'train'/'val' tuple at
index of qidxs. `q` is the query image tensor, `p` is the
corresponding positive image tensor, `n1`,...,`nN` are the negatives
associated with the query. `target` is a tensor (with the shape [2+N])
of integer labels corresponding to the tuple list: query (-1),
positive (1), negative (0).
Raises:
ValueError: Raised if the query indexes list `qidxs` is empty.
"""
if self.__len__() == 0:
raise ValueError(
"List `qidxs` is empty. Run `dataset.create_epoch_tuples(net)` "
"method to create subset for `train`/`val`.")
output = []
# Query image.
output.append(self._loader(
self._img_names_to_full_path(self.images[self._qidxs[index]]),
self._imsize))
# Positive image.
output.append(self._loader(
self._img_names_to_full_path(self.images[self._pidxs[index]]),
self._imsize))
# Negative images.
for nidx in self._nidxs[index]:
output.append(self._loader(
self._img_names_to_full_path(self.images[nidx]),
self._imsize))
# Labels for the query (-1), positive (1), negative (0) images in the tuple.
target = tf.convert_to_tensor([-1, 1] + [0] * self._num_negatives)
output.append(target)
return tuple(output)
def __len__(self):
"""Called to implement the built-in function len().
Returns:
len: Integer, number of query images.
"""
if self._qidxs is None:
return 0
return len(self._qidxs)
def __repr__(self):
"""Metadata for the TupleDataset.
Returns:
meta: String, containing TupleDataset meta.
"""
fmt_str = self.__class__.__name__ + '\n'
fmt_str += '\tName and mode: {} {}\n'.format(self._name, self._mode)
fmt_str += '\tNumber of images: {}\n'.format(len(self.images))
fmt_str += '\tNumber of training tuples: {}\n'.format(len(self._query_pool))
fmt_str += '\tNumber of negatives per tuple: {}\n'.format(
self._num_negatives)
fmt_str += '\tNumber of tuples processed in an epoch: {}\n'.format(
self._num_queries)
fmt_str += '\tPool size for negative remining: {}\n'.format(self._pool_size)
return fmt_str
def create_epoch_tuples(self, net):
"""Creates epoch tuples with the hard-negative re-mining.
Negative examples are selected from clusters different than the cluster
of the query image, as the clusters are ideally non-overlaping. For
every query image we choose hard-negatives, that is, non-matching images
with the most similar descriptor. Hard-negatives depend on the current
CNN parameters. K-nearest neighbors from all non-matching images are
selected. Query images are selected randomly. Positives examples are
fixed for the related query image during the whole training process.
Args:
net: Model, network to be used for negative re-mining.
Raises:
ValueError: If the pool_size is smaller than the number of negative
images per tuple.
Returns:
avg_l2: Float, average negative L2-distance.
"""
self._n = 0
if self._num_negatives < self._pool_size:
raise ValueError("Unable to create epoch tuples. Negative pool_size "
"should be larger than the number of negative images "
"per tuple.")
global_features_utils.debug_and_log(
'>> Creating tuples for an epoch of {}-{}...'.format(self._name,
self._mode),
True)
global_features_utils.debug_and_log(">> Used network: ", True)
global_features_utils.debug_and_log(net.meta_repr(), True)
## Selecting queries.
# Draw `num_queries` random queries for the tuples.
idx_list = np.arange(len(self._query_pool))
np.random.shuffle(idx_list)
idxs2query_pool = idx_list[:self._num_queries]
self._qidxs = [self._query_pool[i] for i in idxs2query_pool]
## Selecting positive pairs.
# Positives examples are fixed for each query during the whole training
# process.
self._pidxs = [self._positive_pool[i] for i in idxs2query_pool]
## Selecting negative pairs.
# If `num_negatives` = 0 create dummy nidxs.
# Useful when only positives used for training.
if self._num_negatives == 0:
self._nidxs = [[] for _ in range(len(self._qidxs))]
return 0
# Draw pool_size random images for pool of negatives images.
neg_idx_list = np.arange(len(self.images))
np.random.shuffle(neg_idx_list)
neg_images_idxs = neg_idx_list[:self._pool_size]
global_features_utils.debug_and_log(
'>> Extracting descriptors for query images...', debug=True)
img_list = self._img_names_to_full_path([self.images[i] for i in
self._qidxs])
qvecs = global_model.extract_global_descriptors_from_list(
net,
images=img_list,
image_size=self._imsize,
print_freq=self._print_freq)
global_features_utils.debug_and_log(
'>> Extracting descriptors for negative pool...', debug=True)
poolvecs = global_model.extract_global_descriptors_from_list(
net,
images=self._img_names_to_full_path([self.images[i] for i in
neg_images_idxs]),
image_size=self._imsize,
print_freq=self._print_freq)
global_features_utils.debug_and_log('>> Searching for hard negatives...',
debug=True)
# Compute dot product scores and ranks.
scores = tf.linalg.matmul(poolvecs, qvecs, transpose_a=True)
ranks = tf.argsort(scores, axis=0, direction='DESCENDING')
sum_ndist = 0.
n_ndist = 0.
# Selection of negative examples.
self._nidxs = []
for q, qidx in enumerate(self._qidxs):
# We are not using the query cluster, those images are potentially
# positive.
qcluster = self._clusters[qidx]
clusters = [qcluster]
nidxs = []
rank = 0
while len(nidxs) < self._num_negatives:
if rank >= tf.shape(ranks)[0]:
raise ValueError("Unable to create epoch tuples. Number of required "
"negative images is larger than the number of "
"clusters in the dataset.")
potential = neg_images_idxs[ranks[rank, q]]
# Take at most one image from the same cluster.
if not self._clusters[potential] in clusters:
nidxs.append(potential)
clusters.append(self._clusters[potential])
dist = tf.norm(qvecs[:, q] - poolvecs[:, ranks[rank, q]],
axis=0).numpy()
sum_ndist += dist
n_ndist += 1
rank += 1
self._nidxs.append(nidxs)
global_features_utils.debug_and_log(
'>> Average negative l2-distance: {:.2f}'.format(
sum_ndist / n_ndist))
# Return average negative L2-distance.
return sum_ndist / n_ndist
| 11,887 | 35.133739 | 80 | py |
models | models-master/research/delf/delf/python/datasets/generic_dataset_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generic dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
from PIL import Image
import tensorflow as tf
from delf.python.datasets import generic_dataset
FLAGS = flags.FLAGS
class GenericDatasetTest(tf.test.TestCase):
"""Test functions for generic dataset."""
def testGenericDataset(self):
"""Tests loading dummy images from list."""
# Number of images to be created.
n = 2
image_names = []
# Create and save `n` dummy images.
for i in range(n):
dummy_image = np.random.rand(1024, 750, 3) * 255
img_out = Image.fromarray(dummy_image.astype('uint8')).convert('RGB')
filename = os.path.join(FLAGS.test_tmpdir,
'test_image_{}.jpg'.format(i))
img_out.save(filename)
image_names.append('test_image_{}.jpg'.format(i))
data = generic_dataset.ImagesFromList(root=FLAGS.test_tmpdir,
image_paths=image_names,
imsize=1024)
self.assertLen(data, n)
if __name__ == '__main__':
tf.test.main()
| 1,915 | 30.409836 | 80 | py |
models | models-master/research/delf/delf/python/datasets/utils.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Supporting functions for data loading."""
import numpy as np
from PIL import Image
import tensorflow as tf
from delf import utils as image_loading_utils
def pil_imagenet_loader(path, imsize, bounding_box=None, preprocess=True):
"""Pillow loader for the images.
Args:
path: Path to image to be loaded.
imsize: Integer, defines the maximum size of longer image side.
bounding_box: (x1,y1,x2,y2) tuple to crop the query image.
preprocess: Bool, whether to preprocess the images in respect to the
ImageNet dataset.
Returns:
image: `Tensor`, image in ImageNet suitable format.
"""
img = image_loading_utils.RgbLoader(path)
if bounding_box is not None:
imfullsize = max(img.size)
img = img.crop(bounding_box)
imsize = imsize * max(img.size) / imfullsize
# Unlike `resize`, `thumbnail` resizes to the largest size that preserves
# the aspect ratio, making sure that the output image does not exceed the
# original image size and the size specified in the arguments of thumbnail.
img.thumbnail((imsize, imsize), Image.ANTIALIAS)
img = np.array(img)
if preprocess:
# Preprocessing for ImageNet data. Converts the images from RGB to BGR,
# then zero-centers each color channel with respect to the ImageNet
# dataset, without scaling.
tf.keras.applications.imagenet_utils.preprocess_input(img, mode='caffe')
return img
def default_loader(path, imsize, bounding_box=None, preprocess=True):
"""Default loader for the images is using Pillow.
Args:
path: Path to image to be loaded.
imsize: Integer, defines the maximum size of longer image side.
bounding_box: (x1,y1,x2,y2) tuple to crop the query image.
preprocess: Bool, whether to preprocess the images in respect to the
ImageNet dataset.
Returns:
image: `Tensor`, image in ImageNet suitable format.
"""
img = pil_imagenet_loader(path, imsize, bounding_box, preprocess)
return img
| 2,662 | 34.506667 | 80 | py |
models | models-master/research/delf/delf/python/datasets/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/delf/delf/python/datasets/utils_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
from PIL import Image
import tensorflow as tf
from delf.python.datasets import utils as image_loading_utils
FLAGS = flags.FLAGS
class UtilsTest(tf.test.TestCase):
def testDefaultLoader(self):
# Create a dummy image.
dummy_image = np.random.rand(1024, 750, 3) * 255
img_out = Image.fromarray(dummy_image.astype('uint8')).convert('RGB')
filename = os.path.join(FLAGS.test_tmpdir, 'test_image.png')
# Save the dummy image.
img_out.save(filename)
max_img_size = 1024
# Load the saved dummy image.
img = image_loading_utils.default_loader(
filename, imsize=max_img_size, preprocess=False)
# Make sure the values are the same before and after loading.
self.assertAllEqual(np.array(img_out), img)
self.assertAllLessEqual(tf.shape(img), max_img_size)
def testDefaultLoaderWithBoundingBox(self):
# Create a dummy image.
dummy_image = np.random.rand(1024, 750, 3) * 255
img_out = Image.fromarray(dummy_image.astype('uint8')).convert('RGB')
filename = os.path.join(FLAGS.test_tmpdir, 'test_image.png')
# Save the dummy image.
img_out.save(filename)
max_img_size = 1024
# Load the saved dummy image.
expected_size = 400
img = image_loading_utils.default_loader(
filename,
imsize=max_img_size,
bounding_box=[120, 120, 120 + expected_size, 120 + expected_size],
preprocess=False)
# Check that the final shape is as expected.
self.assertAllEqual(tf.shape(img), [expected_size, expected_size, 3])
if __name__ == '__main__':
tf.test.main()
| 2,485 | 31.285714 | 80 | py |
models | models-master/research/delf/delf/python/datasets/tuples_dataset_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"Tests for the tuples dataset module."
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
from PIL import Image
import tensorflow as tf
import pickle
from delf.python.datasets import tuples_dataset
from delf.python.training.model import global_model
FLAGS = flags.FLAGS
class TuplesDatasetTest(tf.test.TestCase):
"""Tests for tuples dataset module."""
def testCreateEpochTuples(self):
"""Tests epoch tuple creation."""
# Create a tuples dataset instance.
name = 'test_dataset'
num_queries = 1
pool_size = 5
num_negatives = 2
# Create a ground truth .pkl file.
gnd = {
'train': {'ids': [str(i) + '.png' for i in range(2 * num_queries + pool_size)],
'cluster': [0, 0, 1, 2, 3, 4, 5],
'qidxs': [0], 'pidxs': [1]}}
gnd_name = name + '.pkl'
with tf.io.gfile.GFile(os.path.join(FLAGS.test_tmpdir, gnd_name),
'wb') as gnd_file:
pickle.dump(gnd, gnd_file)
# Create random images for the dataset.
for i in range(2 * num_queries + pool_size):
dummy_image = np.random.rand(1024, 750, 3) * 255
img_out = Image.fromarray(dummy_image.astype('uint8')).convert('RGB')
filename = os.path.join(FLAGS.test_tmpdir, '{}.png'.format(i))
img_out.save(filename)
dataset = tuples_dataset.TuplesDataset(
name=name,
data_root=FLAGS.test_tmpdir,
mode='train',
imsize=1024,
num_negatives=num_negatives,
num_queries=num_queries,
pool_size=pool_size
)
# Assert that initially no negative images are set.
self.assertIsNone(dataset._nidxs)
# Initialize a network for negative re-mining.
model_params = {'architecture': 'ResNet101', 'pooling': 'gem',
'whitening': False, 'pretrained': True}
model = global_model.GlobalFeatureNet(**model_params)
avg_neg_distance = dataset.create_epoch_tuples(model)
# Check that an appropriate number of negative images has been chosen per
# query.
self.assertAllEqual(tf.shape(dataset._nidxs), [num_queries, num_negatives])
if __name__ == '__main__':
tf.test.main()
| 2,953 | 32.191011 | 85 | py |
models | models-master/research/delf/delf/python/datasets/sfm120k/sfm120k.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Structure-from-Motion dataset (Sfm120k) module.
[1] From Single Image Query to Detailed 3D Reconstruction.
Johannes L. Schonberger, Filip Radenovic, Ondrej Chum, Jan-Michael Frahm.
The related paper can be found at: https://ieeexplore.ieee.org/document/7299148.
"""
import os
import pickle
import tensorflow as tf
from delf.python.datasets import tuples_dataset
from delf.python.datasets import utils
def id2filename(image_id, prefix):
"""Creates a training image path out of its id name.
Used for the image mapping in the Sfm120k datset.
Args:
image_id: String, image id.
prefix: String, root directory where images are saved.
Returns:
filename: String, full image filename.
"""
if prefix:
return os.path.join(prefix, image_id[-2:], image_id[-4:-2], image_id[-6:-4],
image_id)
else:
return os.path.join(image_id[-2:], image_id[-4:-2], image_id[-6:-4],
image_id)
class _Sfm120k(tuples_dataset.TuplesDataset):
"""Structure-from-Motion (Sfm120k) dataset instance.
The dataset contains the image names lists for training and validation,
the cluster ID (3D model ID) for each image and indices forming
query-positive pairs of images. The images are loaded per epoch and resized
on the fly to the desired dimensionality.
"""
def __init__(self, mode, data_root, imsize=None, num_negatives=5,
num_queries=2000, pool_size=20000, loader=utils.default_loader,
eccv2020=False):
"""Structure-from-Motion (Sfm120k) dataset initialization.
Args:
mode: Either 'train' or 'val'.
data_root: Path to the root directory of the dataset.
imsize: Integer, defines the maximum size of longer image side.
num_negatives: Integer, number of negative images per one query.
num_queries: Integer, number of query images.
pool_size: Integer, size of the negative image pool, from where the
hard-negative images are chosen.
loader: Callable, a function to load an image given its path.
eccv2020: Bool, whether to use a new validation dataset used with ECCV
2020 paper (https://arxiv.org/abs/2007.13172).
Raises:
ValueError: Raised if `mode` is not one of 'train' or 'val'.
"""
if mode not in ['train', 'val']:
raise ValueError(
"`mode` argument should be either 'train' or 'val', passed as a "
"String.")
# Setting up the paths for the dataset.
if eccv2020:
name = "retrieval-SfM-120k-val-eccv2020"
else:
name = "retrieval-SfM-120k"
db_root = os.path.join(data_root, 'train/retrieval-SfM-120k')
ims_root = os.path.join(db_root, 'ims/')
# Loading the dataset db file.
db_filename = os.path.join(db_root, '{}.pkl'.format(name))
with tf.io.gfile.GFile(db_filename, 'rb') as f:
db = pickle.load(f)[mode]
# Setting full paths for the dataset images.
self.images = [id2filename(img_name, None) for
img_name in db['cids']]
# Initializing tuples dataset.
super().__init__(name, mode, db_root, imsize, num_negatives, num_queries,
pool_size, loader, ims_root)
def Sfm120kInfo(self):
"""Metadata for the Sfm120k dataset.
The dataset contains the image names lists for training and
validation, the cluster ID (3D model ID) for each image and indices
forming query-positive pairs of images. The images are loaded per epoch
and resized on the fly to the desired dimensionality.
Returns:
info: dictionary with the dataset parameters.
"""
info = {'train': {'clusters': 91642, 'pidxs': 181697, 'qidxs': 181697},
'val': {'clusters': 6403, 'pidxs': 1691, 'qidxs': 1691}}
return info
def CreateDataset(mode, data_root, imsize=None, num_negatives=5,
num_queries=2000, pool_size=20000,
loader=utils.default_loader, eccv2020=False):
'''Creates Structure-from-Motion (Sfm120k) dataset.
Args:
mode: String, either 'train' or 'val'.
data_root: Path to the root directory of the dataset.
imsize: Integer, defines the maximum size of longer image side.
num_negatives: Integer, number of negative images per one query.
num_queries: Integer, number of query images.
pool_size: Integer, size of the negative image pool, from where the
hard-negative images are chosen.
loader: Callable, a function to load an image given its path.
eccv2020: Bool, whether to use a new validation dataset used with ECCV
2020 paper (https://arxiv.org/abs/2007.13172).
Returns:
sfm120k: Sfm120k dataset instance.
'''
return _Sfm120k(mode, data_root, imsize, num_negatives, num_queries,
pool_size, loader, eccv2020)
| 5,472 | 37.006944 | 80 | py |
models | models-master/research/delf/delf/python/datasets/sfm120k/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module exposing Sfm120k dataset for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.datasets.sfm120k import sfm120k
# pylint: enable=unused-import
| 963 | 39.166667 | 80 | py |
models | models-master/research/delf/delf/python/datasets/sfm120k/dataset_download.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Structure-from-Motion dataset (Sfm120k) download function."""
import os
import tensorflow as tf
def download_train(data_dir):
"""Checks, and, if required, downloads the necessary files for the training.
Checks if the data necessary for running the example training script exist.
If not, it downloads it in the following folder structure:
DATA_ROOT/train/retrieval-SfM-120k/ : folder with rsfm120k images and db
files.
DATA_ROOT/train/retrieval-SfM-30k/ : folder with rsfm30k images and db
files.
"""
# Create data folder if does not exist.
if not tf.io.gfile.exists(data_dir):
tf.io.gfile.mkdir(data_dir)
# Create datasets folder if does not exist.
datasets_dir = os.path.join(data_dir, 'train')
if not tf.io.gfile.exists(datasets_dir):
tf.io.gfile.mkdir(datasets_dir)
# Download folder train/retrieval-SfM-120k/.
src_dir = 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/train/ims'
dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims')
download_file = 'ims.tar.gz'
if not tf.io.gfile.exists(dst_dir):
src_file = os.path.join(src_dir, download_file)
dst_file = os.path.join(dst_dir, download_file)
print('>> Image directory does not exist. Creating: {}'.format(dst_dir))
tf.io.gfile.makedirs(dst_dir)
print('>> Downloading ims.tar.gz...')
os.system('wget {} -O {}'.format(src_file, dst_file))
print('>> Extracting {}...'.format(dst_file))
os.system('tar -zxf {} -C {}'.format(dst_file, dst_dir))
print('>> Extracted, deleting {}...'.format(dst_file))
os.system('rm {}'.format(dst_file))
# Create symlink for train/retrieval-SfM-30k/.
dst_dir_old = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims')
dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-30k', 'ims')
if not (tf.io.gfile.exists(dst_dir) or os.path.islink(dst_dir)):
tf.io.gfile.makedirs(os.path.join(datasets_dir, 'retrieval-SfM-30k'))
os.system('ln -s {} {}'.format(dst_dir_old, dst_dir))
print(
'>> Created symbolic link from retrieval-SfM-120k/ims to '
'retrieval-SfM-30k/ims')
# Download db files.
src_dir = 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/train/dbs'
datasets = ['retrieval-SfM-120k', 'retrieval-SfM-30k']
for dataset in datasets:
dst_dir = os.path.join(datasets_dir, dataset)
if dataset == 'retrieval-SfM-120k':
download_files = ['{}.pkl'.format(dataset),
'{}-whiten.pkl'.format(dataset)]
download_eccv2020 = '{}-val-eccv2020.pkl'.format(dataset)
elif dataset == 'retrieval-SfM-30k':
download_files = ['{}-whiten.pkl'.format(dataset)]
download_eccv2020 = None
if not tf.io.gfile.exists(dst_dir):
print('>> Dataset directory does not exist. Creating: {}'.format(
dst_dir))
tf.io.gfile.mkdir(dst_dir)
for i in range(len(download_files)):
src_file = os.path.join(src_dir, download_files[i])
dst_file = os.path.join(dst_dir, download_files[i])
if not os.path.isfile(dst_file):
print('>> DB file {} does not exist. Downloading...'.format(
download_files[i]))
os.system('wget {} -O {}'.format(src_file, dst_file))
if download_eccv2020:
eccv2020_dst_file = os.path.join(dst_dir, download_eccv2020)
if not os.path.isfile(eccv2020_dst_file):
eccv2020_src_dir = \
"http://ptak.felk.cvut.cz/personal/toliageo/share/how/dataset/"
eccv2020_dst_file = os.path.join(dst_dir, download_eccv2020)
eccv2020_src_file = os.path.join(eccv2020_src_dir,
download_eccv2020)
os.system('wget {} -O {}'.format(eccv2020_src_file,
eccv2020_dst_file))
| 4,479 | 42.076923 | 80 | py |
models | models-master/research/delf/delf/python/datasets/sfm120k/sfm120k_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sfm120k dataset module."""
import tensorflow as tf
from delf.python.datasets.sfm120k import sfm120k
class Sfm120kTest(tf.test.TestCase):
"""Tests for Sfm120k dataset module."""
def testId2Filename(self):
"""Tests conversion of image id to full path mapping."""
image_id = "29fdc243aeb939388cfdf2d081dc080e"
prefix = "train/retrieval-SfM-120k/ims/"
path = sfm120k.id2filename(image_id, prefix)
expected_path = "train/retrieval-SfM-120k/ims/0e/08/dc" \
"/29fdc243aeb939388cfdf2d081dc080e"
self.assertEqual(path, expected_path)
if __name__ == '__main__':
tf.test.main()
| 1,345 | 34.421053 | 80 | py |
models | models-master/research/delf/delf/python/datasets/revisited_op/dataset_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the python library parsing Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf.python.datasets.revisited_op import dataset
FLAGS = flags.FLAGS
class DatasetTest(tf.test.TestCase):
def testParseEasyMediumHardGroundTruth(self):
# Define input.
ground_truth = [{
'easy': np.array([10, 56, 100]),
'hard': np.array([0]),
'junk': np.array([6, 90])
}, {
'easy': np.array([], dtype='int64'),
'hard': [5],
'junk': [99, 100]
}, {
'easy': [33],
'hard': [66, 99],
'junk': np.array([], dtype='int64')
}]
# Run tested function.
(easy_ground_truth, medium_ground_truth,
hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth)
# Define expected outputs.
expected_easy_ground_truth = [{
'ok': np.array([10, 56, 100]),
'junk': np.array([6, 90, 0])
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([99, 100, 5])
}, {
'ok': np.array([33]),
'junk': np.array([66, 99])
}]
expected_medium_ground_truth = [{
'ok': np.array([10, 56, 100, 0]),
'junk': np.array([6, 90])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([33, 66, 99]),
'junk': np.array([], dtype='int64')
}]
expected_hard_ground_truth = [{
'ok': np.array([0]),
'junk': np.array([6, 90, 10, 56, 100])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([66, 99]),
'junk': np.array([33])
}]
# Compare actual versus expected.
def _AssertListOfDictsOfArraysAreEqual(ground_truth, expected_ground_truth):
"""Helper function to compare ground-truth data.
Args:
ground_truth: List of dicts of arrays.
expected_ground_truth: List of dicts of arrays.
"""
self.assertEqual(len(ground_truth), len(expected_ground_truth))
for i, ground_truth_entry in enumerate(ground_truth):
self.assertEqual(sorted(ground_truth_entry.keys()), ['junk', 'ok'])
self.assertAllEqual(ground_truth_entry['junk'],
expected_ground_truth[i]['junk'])
self.assertAllEqual(ground_truth_entry['ok'],
expected_ground_truth[i]['ok'])
_AssertListOfDictsOfArraysAreEqual(easy_ground_truth,
expected_easy_ground_truth)
_AssertListOfDictsOfArraysAreEqual(medium_ground_truth,
expected_medium_ground_truth)
_AssertListOfDictsOfArraysAreEqual(hard_ground_truth,
expected_hard_ground_truth)
def testAdjustPositiveRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 6, 10, 20])
junk_ranks = np.array([1, 8, 9, 30])
# Run tested function.
adjusted_positive_ranks = dataset.AdjustPositiveRanks(
positive_ranks, junk_ranks)
# Define expected output.
expected_adjusted_positive_ranks = [0, 1, 5, 7, 17]
# Compare actual versus expected.
self.assertAllEqual(adjusted_positive_ranks,
expected_adjusted_positive_ranks)
def testComputeAveragePrecisionWorks(self):
# Define input.
positive_ranks = [0, 2, 5]
# Run tested function.
average_precision = dataset.ComputeAveragePrecision(positive_ranks)
# Define expected output.
expected_average_precision = 0.677778
# Compare actual versus expected.
self.assertAllClose(average_precision, expected_average_precision)
def testComputePRAtRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 5])
desired_pr_ranks = np.array([1, 5, 10])
# Run tested function.
precisions, recalls = dataset.ComputePRAtRanks(positive_ranks,
desired_pr_ranks)
# Define expected outputs.
expected_precisions = [1.0, 0.4, 0.5]
expected_recalls = [0.333333, 0.666667, 1.0]
# Compare actual versus expected.
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testComputeMetricsWorks(self):
# Define inputs: 3 queries. For the last one, there are no expected images
# to be retrieved
sorted_index_ids = np.array([[4, 2, 0, 1, 3], [0, 2, 4, 1, 3],
[0, 1, 2, 3, 4]])
ground_truth = [{
'ok': np.array([0, 1]),
'junk': np.array([2])
}, {
'ok': np.array([0, 4]),
'junk': np.array([], dtype='int64')
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([], dtype='int64')
}]
desired_pr_ranks = [1, 2, 5]
# Run tested function.
(mean_average_precision, mean_precisions, mean_recalls, average_precisions,
precisions, recalls) = dataset.ComputeMetrics(sorted_index_ids,
ground_truth,
desired_pr_ranks)
# Define expected outputs.
expected_mean_average_precision = 0.604167
expected_mean_precisions = [0.5, 0.5, 0.666667]
expected_mean_recalls = [0.25, 0.5, 1.0]
expected_average_precisions = [0.416667, 0.791667, float('nan')]
expected_precisions = [[0.0, 0.5, 0.666667], [1.0, 0.5, 0.666667],
[float('nan'),
float('nan'),
float('nan')]]
expected_recalls = [[0.0, 0.5, 1.0], [0.5, 0.5, 1.0],
[float('nan'), float('nan'),
float('nan')]]
# Compare actual versus expected.
self.assertAllClose(mean_average_precision, expected_mean_average_precision)
self.assertAllClose(mean_precisions, expected_mean_precisions)
self.assertAllClose(mean_recalls, expected_mean_recalls)
self.assertAllClose(average_precisions, expected_average_precisions)
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testSaveMetricsFileWorks(self):
# Define inputs.
mean_average_precision = {'hard': 0.7, 'medium': 0.9}
mean_precisions = {
'hard': np.array([1.0, 0.8]),
'medium': np.array([1.0, 1.0])
}
mean_recalls = {
'hard': np.array([0.5, 0.8]),
'medium': np.array([0.5, 1.0])
}
pr_ranks = [1, 5]
output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
# Run tested function.
dataset.SaveMetricsFile(mean_average_precision, mean_precisions,
mean_recalls, pr_ranks, output_path)
# Define expected results.
expected_metrics = ('hard\n'
' mAP=70.0\n'
' mP@k[1 5] [100. 80.]\n'
' mR@k[1 5] [50. 80.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n')
# Parse actual results, and compare to expected.
with tf.io.gfile.GFile(output_path) as f:
metrics = f.read()
self.assertEqual(metrics, expected_metrics)
def testSaveAndReadMetricsWorks(self):
# Define inputs.
mean_average_precision = {'hard': 0.7, 'medium': 0.9}
mean_precisions = {
'hard': np.array([1.0, 0.8]),
'medium': np.array([1.0, 1.0])
}
mean_recalls = {
'hard': np.array([0.5, 0.8]),
'medium': np.array([0.5, 1.0])
}
pr_ranks = [1, 5]
output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
# Run tested functions.
dataset.SaveMetricsFile(mean_average_precision, mean_precisions,
mean_recalls, pr_ranks, output_path)
(read_mean_average_precision, read_pr_ranks, read_mean_precisions,
read_mean_recalls) = dataset.ReadMetricsFile(output_path)
# Compares actual and expected metrics.
self.assertEqual(read_mean_average_precision, mean_average_precision)
self.assertEqual(read_pr_ranks, pr_ranks)
self.assertEqual(read_mean_precisions.keys(), mean_precisions.keys())
self.assertAllEqual(read_mean_precisions['hard'], mean_precisions['hard'])
self.assertAllEqual(read_mean_precisions['medium'],
mean_precisions['medium'])
self.assertEqual(read_mean_recalls.keys(), mean_recalls.keys())
self.assertAllEqual(read_mean_recalls['hard'], mean_recalls['hard'])
self.assertAllEqual(read_mean_recalls['medium'], mean_recalls['medium'])
def testReadMetricsWithRepeatedProtocolFails(self):
# Define inputs.
input_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
with tf.io.gfile.GFile(input_path, 'w') as f:
f.write('hard\n'
' mAP=70.0\n'
' mP@k[1 5] [ 100. 80.]\n'
' mR@k[1 5] [ 50. 80.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [ 100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [ 100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n')
# Run tested functions.
with self.assertRaisesRegex(ValueError, 'Malformed input'):
dataset.ReadMetricsFile(input_path)
if __name__ == '__main__':
tf.test.main()
| 10,271 | 34.543253 | 80 | py |
models | models-master/research/delf/delf/python/datasets/revisited_op/dataset.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python library to parse ground-truth/evaluate on Revisited datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import numpy as np
from scipy.io import matlab
import tensorflow as tf
_GROUND_TRUTH_KEYS = ['easy', 'hard', 'junk']
DATASET_NAMES = ['roxford5k', 'rparis6k']
def ReadDatasetFile(dataset_file_path):
"""Reads dataset file in Revisited Oxford/Paris ".mat" format.
Args:
dataset_file_path: Path to dataset file, in .mat format.
Returns:
query_list: List of query image names.
index_list: List of index image names.
ground_truth: List containing ground-truth information for dataset. Each
entry is a dict corresponding to the ground-truth information for a query.
The dict may have keys 'easy', 'hard', or 'junk', mapping to a NumPy
array of integers; additionally, it has a key 'bbx' mapping to a NumPy
array of floats with bounding box coordinates.
"""
with tf.io.gfile.GFile(dataset_file_path, 'rb') as f:
cfg = matlab.loadmat(f)
# Parse outputs according to the specificities of the dataset file.
query_list = [str(im_array[0]) for im_array in np.squeeze(cfg['qimlist'])]
index_list = [str(im_array[0]) for im_array in np.squeeze(cfg['imlist'])]
ground_truth_raw = np.squeeze(cfg['gnd'])
ground_truth = []
for query_ground_truth_raw in ground_truth_raw:
query_ground_truth = {}
for ground_truth_key in _GROUND_TRUTH_KEYS:
if ground_truth_key in query_ground_truth_raw.dtype.names:
adjusted_labels = query_ground_truth_raw[ground_truth_key] - 1
query_ground_truth[ground_truth_key] = adjusted_labels.flatten()
query_ground_truth['bbx'] = np.squeeze(query_ground_truth_raw['bbx'])
ground_truth.append(query_ground_truth)
return query_list, index_list, ground_truth
def _ParseGroundTruth(ok_list, junk_list):
"""Constructs dictionary of ok/junk indices for a data subset and query.
Args:
ok_list: List of NumPy arrays containing true positive indices for query.
junk_list: List of NumPy arrays containing ignored indices for query.
Returns:
ok_junk_dict: Dict mapping 'ok' and 'junk' strings to NumPy array of
indices.
"""
ok_junk_dict = {}
ok_junk_dict['ok'] = np.concatenate(ok_list)
ok_junk_dict['junk'] = np.concatenate(junk_list)
return ok_junk_dict
def ParseEasyMediumHardGroundTruth(ground_truth):
"""Parses easy/medium/hard ground-truth from Revisited datasets.
Args:
ground_truth: Usually the output from ReadDatasetFile(). List containing
ground-truth information for dataset. Each entry is a dict corresponding
to the ground-truth information for a query. The dict must have keys
'easy', 'hard', and 'junk', mapping to a NumPy array of integers.
Returns:
easy_ground_truth: List containing ground-truth information for easy subset
of dataset. Each entry is a dict corresponding to the ground-truth
information for a query. The dict has keys 'ok' and 'junk', mapping to a
NumPy array of integers.
medium_ground_truth: Same as `easy_ground_truth`, but for the medium subset.
hard_ground_truth: Same as `easy_ground_truth`, but for the hard subset.
"""
num_queries = len(ground_truth)
easy_ground_truth = []
medium_ground_truth = []
hard_ground_truth = []
for i in range(num_queries):
easy_ground_truth.append(
_ParseGroundTruth([ground_truth[i]['easy']],
[ground_truth[i]['junk'], ground_truth[i]['hard']]))
medium_ground_truth.append(
_ParseGroundTruth([ground_truth[i]['easy'], ground_truth[i]['hard']],
[ground_truth[i]['junk']]))
hard_ground_truth.append(
_ParseGroundTruth([ground_truth[i]['hard']],
[ground_truth[i]['junk'], ground_truth[i]['easy']]))
return easy_ground_truth, medium_ground_truth, hard_ground_truth
def AdjustPositiveRanks(positive_ranks, junk_ranks):
"""Adjusts positive ranks based on junk ranks.
Args:
positive_ranks: Sorted 1D NumPy integer array.
junk_ranks: Sorted 1D NumPy integer array.
Returns:
adjusted_positive_ranks: Sorted 1D NumPy array.
"""
if not junk_ranks.size:
return positive_ranks
adjusted_positive_ranks = positive_ranks
j = 0
for i, positive_index in enumerate(positive_ranks):
while (j < len(junk_ranks) and positive_index > junk_ranks[j]):
j += 1
adjusted_positive_ranks[i] -= j
return adjusted_positive_ranks
def ComputeAveragePrecision(positive_ranks):
"""Computes average precision according to dataset convention.
It assumes that `positive_ranks` contains the ranks for all expected positive
index images to be retrieved. If `positive_ranks` is empty, returns
`average_precision` = 0.
Note that average precision computation here does NOT use the finite sum
method (see
https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision)
which is common in information retrieval literature. Instead, the method
implemented here integrates over the precision-recall curve by averaging two
adjacent precision points, then multiplying by the recall step. This is the
convention for the Revisited Oxford/Paris datasets.
Args:
positive_ranks: Sorted 1D NumPy integer array, zero-indexed.
Returns:
average_precision: Float.
"""
average_precision = 0.0
num_expected_positives = len(positive_ranks)
if not num_expected_positives:
return average_precision
recall_step = 1.0 / num_expected_positives
for i, rank in enumerate(positive_ranks):
if not rank:
left_precision = 1.0
else:
left_precision = i / rank
right_precision = (i + 1) / (rank + 1)
average_precision += (left_precision + right_precision) * recall_step / 2
return average_precision
def ComputePRAtRanks(positive_ranks, desired_pr_ranks):
"""Computes precision/recall at desired ranks.
It assumes that `positive_ranks` contains the ranks for all expected positive
index images to be retrieved. If `positive_ranks` is empty, return all-zeros
`precisions`/`recalls`.
If a desired rank is larger than the last positive rank, its precision is
computed based on the last positive rank. For example, if `desired_pr_ranks`
is [10] and `positive_ranks` = [0, 7] --> `precisions` = [0.25], `recalls` =
[1.0].
Args:
positive_ranks: 1D NumPy integer array, zero-indexed.
desired_pr_ranks: List of integers containing the desired precision/recall
ranks to be reported. Eg, if precision@1/recall@1 and
precision@10/recall@10 are desired, this should be set to [1, 10].
Returns:
precisions: Precision @ `desired_pr_ranks` (NumPy array of
floats, with shape [len(desired_pr_ranks)]).
recalls: Recall @ `desired_pr_ranks` (NumPy array of floats, with
shape [len(desired_pr_ranks)]).
"""
num_desired_pr_ranks = len(desired_pr_ranks)
precisions = np.zeros([num_desired_pr_ranks])
recalls = np.zeros([num_desired_pr_ranks])
num_expected_positives = len(positive_ranks)
if not num_expected_positives:
return precisions, recalls
positive_ranks_one_indexed = positive_ranks + 1
for i, desired_pr_rank in enumerate(desired_pr_ranks):
recalls[i] = np.sum(
positive_ranks_one_indexed <= desired_pr_rank) / num_expected_positives
# If `desired_pr_rank` is larger than last positive's rank, only compute
# precision with respect to last positive's position.
precision_rank = min(max(positive_ranks_one_indexed), desired_pr_rank)
precisions[i] = np.sum(
positive_ranks_one_indexed <= precision_rank) / precision_rank
return precisions, recalls
def ComputeMetrics(sorted_index_ids, ground_truth, desired_pr_ranks):
"""Computes metrics for retrieval results on the Revisited datasets.
If there are no valid ground-truth index images for a given query, the metric
results for the given query (`average_precisions`, `precisions` and `recalls`)
are set to NaN, and they are not taken into account when computing the
aggregated metrics (`mean_average_precision`, `mean_precisions` and
`mean_recalls`) over all queries.
Args:
sorted_index_ids: Integer NumPy array of shape [#queries, #index_images].
For each query, contains an array denoting the most relevant index images,
sorted from most to least relevant.
ground_truth: List containing ground-truth information for dataset. Each
entry is a dict corresponding to the ground-truth information for a query.
The dict has keys 'ok' and 'junk', mapping to a NumPy array of integers.
desired_pr_ranks: List of integers containing the desired precision/recall
ranks to be reported. Eg, if precision@1/recall@1 and
precision@10/recall@10 are desired, this should be set to [1, 10]. The
largest item should be <= #index_images.
Returns:
mean_average_precision: Mean average precision (float).
mean_precisions: Mean precision @ `desired_pr_ranks` (NumPy array of
floats, with shape [len(desired_pr_ranks)]).
mean_recalls: Mean recall @ `desired_pr_ranks` (NumPy array of floats, with
shape [len(desired_pr_ranks)]).
average_precisions: Average precision for each query (NumPy array of floats,
with shape [#queries]).
precisions: Precision @ `desired_pr_ranks`, for each query (NumPy array of
floats, with shape [#queries, len(desired_pr_ranks)]).
recalls: Recall @ `desired_pr_ranks`, for each query (NumPy array of
floats, with shape [#queries, len(desired_pr_ranks)]).
Raises:
ValueError: If largest desired PR rank in `desired_pr_ranks` >
#index_images.
"""
num_queries, num_index_images = sorted_index_ids.shape
num_desired_pr_ranks = len(desired_pr_ranks)
sorted_desired_pr_ranks = sorted(desired_pr_ranks)
if sorted_desired_pr_ranks[-1] > num_index_images:
raise ValueError(
'Requested PR ranks up to %d, however there are only %d images' %
(sorted_desired_pr_ranks[-1], num_index_images))
# Instantiate all outputs, then loop over each query and gather metrics.
mean_average_precision = 0.0
mean_precisions = np.zeros([num_desired_pr_ranks])
mean_recalls = np.zeros([num_desired_pr_ranks])
average_precisions = np.zeros([num_queries])
precisions = np.zeros([num_queries, num_desired_pr_ranks])
recalls = np.zeros([num_queries, num_desired_pr_ranks])
num_empty_gt_queries = 0
for i in range(num_queries):
ok_index_images = ground_truth[i]['ok']
junk_index_images = ground_truth[i]['junk']
if not ok_index_images.size:
average_precisions[i] = float('nan')
precisions[i, :] = float('nan')
recalls[i, :] = float('nan')
num_empty_gt_queries += 1
continue
positive_ranks = np.arange(num_index_images)[np.in1d(
sorted_index_ids[i], ok_index_images)]
junk_ranks = np.arange(num_index_images)[np.in1d(sorted_index_ids[i],
junk_index_images)]
adjusted_positive_ranks = AdjustPositiveRanks(positive_ranks, junk_ranks)
average_precisions[i] = ComputeAveragePrecision(adjusted_positive_ranks)
precisions[i, :], recalls[i, :] = ComputePRAtRanks(adjusted_positive_ranks,
desired_pr_ranks)
mean_average_precision += average_precisions[i]
mean_precisions += precisions[i, :]
mean_recalls += recalls[i, :]
# Normalize aggregated metrics by number of queries.
num_valid_queries = num_queries - num_empty_gt_queries
mean_average_precision /= num_valid_queries
mean_precisions /= num_valid_queries
mean_recalls /= num_valid_queries
return (mean_average_precision, mean_precisions, mean_recalls,
average_precisions, precisions, recalls)
def SaveMetricsFile(mean_average_precision, mean_precisions, mean_recalls,
pr_ranks, output_path):
"""Saves aggregated retrieval metrics to text file.
Args:
mean_average_precision: Dict mapping each dataset protocol to a float.
mean_precisions: Dict mapping each dataset protocol to a NumPy array of
floats with shape [len(pr_ranks)].
mean_recalls: Dict mapping each dataset protocol to a NumPy array of floats
with shape [len(pr_ranks)].
pr_ranks: List of integers.
output_path: Full file path.
"""
with tf.io.gfile.GFile(output_path, 'w') as f:
for k in sorted(mean_average_precision.keys()):
f.write('{}\n mAP={}\n mP@k{} {}\n mR@k{} {}\n'.format(
k, np.around(mean_average_precision[k] * 100, decimals=2),
np.array(pr_ranks), np.around(mean_precisions[k] * 100, decimals=2),
np.array(pr_ranks), np.around(mean_recalls[k] * 100, decimals=2)))
def _ParseSpaceSeparatedStringsInBrackets(line, prefixes, ind):
"""Parses line containing space-separated strings in brackets.
Args:
line: String, containing line in metrics file with mP@k or mR@k figures.
prefixes: Tuple/list of strings, containing valid prefixes.
ind: Integer indicating which field within brackets is parsed.
Yields:
entry: String format entry.
Raises:
ValueError: If input line does not contain a valid prefix.
"""
for prefix in prefixes:
if line.startswith(prefix):
line = line[len(prefix):]
break
else:
raise ValueError('Line %s is malformed, cannot find valid prefixes' % line)
for entry in line.split('[')[ind].split(']')[0].split():
yield entry
def _ParsePrRanks(line):
"""Parses PR ranks from mP@k line in metrics file.
Args:
line: String, containing line in metrics file with mP@k figures.
Returns:
pr_ranks: List of integers, containing used ranks.
Raises:
ValueError: If input line is malformed.
"""
return [
int(pr_rank) for pr_rank in _ParseSpaceSeparatedStringsInBrackets(
line, [' mP@k['], 0) if pr_rank
]
def _ParsePrScores(line, num_pr_ranks):
"""Parses PR scores from line in metrics file.
Args:
line: String, containing line in metrics file with mP@k or mR@k figures.
num_pr_ranks: Integer, number of scores that should be in output list.
Returns:
pr_scores: List of floats, containing scores.
Raises:
ValueError: If input line is malformed.
"""
pr_scores = [
float(pr_score) for pr_score in _ParseSpaceSeparatedStringsInBrackets(
line, (' mP@k[', ' mR@k['), 1) if pr_score
]
if len(pr_scores) != num_pr_ranks:
raise ValueError('Line %s is malformed, expected %d scores but found %d' %
(line, num_pr_ranks, len(pr_scores)))
return pr_scores
def ReadMetricsFile(metrics_path):
"""Reads aggregated retrieval metrics from text file.
Args:
metrics_path: Full file path, containing aggregated retrieval metrics.
Returns:
mean_average_precision: Dict mapping each dataset protocol to a float.
pr_ranks: List of integer ranks used in aggregated recall/precision metrics.
mean_precisions: Dict mapping each dataset protocol to a NumPy array of
floats with shape [len(`pr_ranks`)].
mean_recalls: Dict mapping each dataset protocol to a NumPy array of floats
with shape [len(`pr_ranks`)].
Raises:
ValueError: If input file is malformed.
"""
with tf.io.gfile.GFile(metrics_path, 'r') as f:
file_contents_stripped = [l.rstrip() for l in f]
if len(file_contents_stripped) % 4:
raise ValueError(
'Malformed input %s: number of lines must be a multiple of 4, '
'but it is %d' % (metrics_path, len(file_contents_stripped)))
mean_average_precision = {}
pr_ranks = []
mean_precisions = {}
mean_recalls = {}
protocols = set()
for i in range(0, len(file_contents_stripped), 4):
protocol = file_contents_stripped[i]
if protocol in protocols:
raise ValueError(
'Malformed input %s: protocol %s is found a second time' %
(metrics_path, protocol))
protocols.add(protocol)
# Parse mAP.
mean_average_precision[protocol] = float(
file_contents_stripped[i + 1].split('=')[1]) / 100.0
# Parse (or check consistency of) pr_ranks.
parsed_pr_ranks = _ParsePrRanks(file_contents_stripped[i + 2])
if not pr_ranks:
pr_ranks = parsed_pr_ranks
else:
if parsed_pr_ranks != pr_ranks:
raise ValueError('Malformed input %s: inconsistent PR ranks' %
metrics_path)
# Parse mean precisions.
mean_precisions[protocol] = np.array(
_ParsePrScores(file_contents_stripped[i + 2], len(pr_ranks)),
dtype=float) / 100.0
# Parse mean recalls.
mean_recalls[protocol] = np.array(
_ParsePrScores(file_contents_stripped[i + 3], len(pr_ranks)),
dtype=float) / 100.0
return mean_average_precision, pr_ranks, mean_precisions, mean_recalls
def CreateConfigForTestDataset(dataset, dir_main):
"""Creates the configuration dictionary for the test dataset.
Args:
dataset: String, dataset name: either 'roxford5k' or 'rparis6k'.
dir_main: String, path to the folder containing ground truth files.
Returns:
cfg: Dataset configuration in a form of dictionary. The configuration
includes:
`gnd_fname` - path to the ground truth file for the dataset,
`ext` and `qext` - image extensions for the images in the test dataset
and the query images,
`dir_data` - path to the folder containing ground truth files,
`dir_images` - path to the folder containing images,
`n` and `nq` - number of images and query images in the dataset
respectively,
`im_fname` and `qim_fname` - functions providing paths for the dataset
and query images respectively,
`dataset` - test dataset name.
Raises:
ValueError: If an unknown dataset name is provided as an argument.
"""
dataset = dataset.lower()
def _ConfigImname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['imlist'][i] + cfg['ext'])
def _ConfigQimname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['qimlist'][i] + cfg['qext'])
if dataset not in DATASET_NAMES:
raise ValueError('Unknown dataset: {}!'.format(dataset))
# Loading imlist, qimlist, and gnd in configuration as a dictionary.
gnd_fname = os.path.join(dir_main, 'gnd_{}.pkl'.format(dataset))
with tf.io.gfile.GFile(gnd_fname, 'rb') as f:
cfg = pickle.load(f)
cfg['gnd_fname'] = gnd_fname
if dataset == 'rparis6k':
dir_images = 'paris6k_images'
elif dataset == 'roxford5k':
dir_images = 'oxford5k_images'
cfg['ext'] = '.jpg'
cfg['qext'] = '.jpg'
cfg['dir_data'] = os.path.join(dir_main)
cfg['dir_images'] = os.path.join(cfg['dir_data'], dir_images)
cfg['n'] = len(cfg['imlist'])
cfg['nq'] = len(cfg['qimlist'])
cfg['im_fname'] = _ConfigImname
cfg['qim_fname'] = _ConfigQimname
cfg['dataset'] = dataset
return cfg
| 19,665 | 35.690299 | 94 | py |
models | models-master/research/delf/delf/python/datasets/revisited_op/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for revisited Oxford and Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.datasets.revisited_op import dataset
# pylint: enable=unused-import
| 969 | 41.173913 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/compute_retrieval_metrics.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes metrics for Google Landmarks Retrieval dataset predictions.
Metrics are written to stdout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from absl import app
from delf.python.datasets.google_landmarks_dataset import dataset_file_io
from delf.python.datasets.google_landmarks_dataset import metrics
cmd_args = None
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read solution.
print('Reading solution...')
public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution(
cmd_args.solution_path, dataset_file_io.RETRIEVAL_TASK_ID)
print('done!')
# Read predictions.
print('Reading predictions...')
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
cmd_args.predictions_path, set(public_solution.keys()),
set(private_solution.keys()), set(ignored_ids),
dataset_file_io.RETRIEVAL_TASK_ID)
print('done!')
# Mean average precision.
print('**********************************************')
print('(Public) Mean Average Precision: %f' %
metrics.MeanAveragePrecision(public_predictions, public_solution))
print('(Private) Mean Average Precision: %f' %
metrics.MeanAveragePrecision(private_predictions, private_solution))
# Mean precision@k.
print('**********************************************')
public_precisions = 100.0 * metrics.MeanPrecisions(public_predictions,
public_solution)
private_precisions = 100.0 * metrics.MeanPrecisions(private_predictions,
private_solution)
print('(Public) Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, '
'P@50: %.2f, P@100: %.2f' %
(public_precisions[0], public_precisions[4], public_precisions[9],
public_precisions[49], public_precisions[99]))
print('(Private) Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, '
'P@50: %.2f, P@100: %.2f' %
(private_precisions[0], private_precisions[4], private_precisions[9],
private_precisions[49], private_precisions[99]))
# Mean/median position of first correct.
print('**********************************************')
public_mean_position, public_median_position = metrics.MeanMedianPosition(
public_predictions, public_solution)
private_mean_position, private_median_position = metrics.MeanMedianPosition(
private_predictions, private_solution)
print('(Public) Mean position: %.2f, median position: %.2f' %
(public_mean_position, public_median_position))
print('(Private) Mean position: %.2f, median position: %.2f' %
(private_mean_position, private_median_position))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--predictions_path',
type=str,
default='/tmp/predictions.csv',
help="""
Path to CSV predictions file, formatted with columns 'id,images' (the
file should include a header).
""")
parser.add_argument(
'--solution_path',
type=str,
default='/tmp/solution.csv',
help="""
Path to CSV solution file, formatted with columns 'id,images,Usage'
(the file should include a header).
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 4,230 | 38.542056 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/metrics_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Google Landmarks dataset metric computation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from delf.python.datasets.google_landmarks_dataset import metrics
def _CreateRecognitionSolution():
"""Creates recognition solution to be used in tests.
Returns:
solution: Dict mapping test image ID to list of ground-truth landmark IDs.
"""
return {
'0123456789abcdef': [0, 12],
'0223456789abcdef': [100, 200, 300],
'0323456789abcdef': [1],
'0423456789abcdef': [],
'0523456789abcdef': [],
}
def _CreateRecognitionPredictions():
"""Creates recognition predictions to be used in tests.
Returns:
predictions: Dict mapping test image ID to a dict with keys 'class'
(integer) and 'score' (float).
"""
return {
'0223456789abcdef': {
'class': 0,
'score': 0.01
},
'0323456789abcdef': {
'class': 1,
'score': 10.0
},
'0423456789abcdef': {
'class': 150,
'score': 15.0
},
}
def _CreateRetrievalSolution():
"""Creates retrieval solution to be used in tests.
Returns:
solution: Dict mapping test image ID to list of ground-truth image IDs.
"""
return {
'0123456789abcdef': ['fedcba9876543210', 'fedcba9876543220'],
'0223456789abcdef': ['fedcba9876543210'],
'0323456789abcdef': [
'fedcba9876543230', 'fedcba9876543240', 'fedcba9876543250'
],
'0423456789abcdef': ['fedcba9876543230'],
}
def _CreateRetrievalPredictions():
"""Creates retrieval predictions to be used in tests.
Returns:
predictions: Dict mapping test image ID to a list with predicted index image
ids.
"""
return {
'0223456789abcdef': ['fedcba9876543200', 'fedcba9876543210'],
'0323456789abcdef': ['fedcba9876543240'],
'0423456789abcdef': ['fedcba9876543230', 'fedcba9876543240'],
}
class MetricsTest(tf.test.TestCase):
def testGlobalAveragePrecisionWorks(self):
# Define input.
predictions = _CreateRecognitionPredictions()
solution = _CreateRecognitionSolution()
# Run tested function.
gap = metrics.GlobalAveragePrecision(predictions, solution)
# Define expected results.
expected_gap = 0.166667
# Compare actual and expected results.
self.assertAllClose(gap, expected_gap)
def testGlobalAveragePrecisionIgnoreNonGroundTruthWorks(self):
# Define input.
predictions = _CreateRecognitionPredictions()
solution = _CreateRecognitionSolution()
# Run tested function.
gap = metrics.GlobalAveragePrecision(
predictions, solution, ignore_non_gt_test_images=True)
# Define expected results.
expected_gap = 0.333333
# Compare actual and expected results.
self.assertAllClose(gap, expected_gap)
def testTop1AccuracyWorks(self):
# Define input.
predictions = _CreateRecognitionPredictions()
solution = _CreateRecognitionSolution()
# Run tested function.
accuracy = metrics.Top1Accuracy(predictions, solution)
# Define expected results.
expected_accuracy = 0.333333
# Compare actual and expected results.
self.assertAllClose(accuracy, expected_accuracy)
def testMeanAveragePrecisionWorks(self):
# Define input.
predictions = _CreateRetrievalPredictions()
solution = _CreateRetrievalSolution()
# Run tested function.
mean_ap = metrics.MeanAveragePrecision(predictions, solution)
# Define expected results.
expected_mean_ap = 0.458333
# Compare actual and expected results.
self.assertAllClose(mean_ap, expected_mean_ap)
def testMeanAveragePrecisionMaxPredictionsWorks(self):
# Define input.
predictions = _CreateRetrievalPredictions()
solution = _CreateRetrievalSolution()
# Run tested function.
mean_ap = metrics.MeanAveragePrecision(
predictions, solution, max_predictions=1)
# Define expected results.
expected_mean_ap = 0.5
# Compare actual and expected results.
self.assertAllClose(mean_ap, expected_mean_ap)
def testMeanPrecisionsWorks(self):
# Define input.
predictions = _CreateRetrievalPredictions()
solution = _CreateRetrievalSolution()
# Run tested function.
mean_precisions = metrics.MeanPrecisions(
predictions, solution, max_predictions=2)
# Define expected results.
expected_mean_precisions = [0.5, 0.375]
# Compare actual and expected results.
self.assertAllClose(mean_precisions, expected_mean_precisions)
def testMeanMedianPositionWorks(self):
# Define input.
predictions = _CreateRetrievalPredictions()
solution = _CreateRetrievalSolution()
# Run tested function.
mean_position, median_position = metrics.MeanMedianPosition(
predictions, solution)
# Define expected results.
expected_mean_position = 26.25
expected_median_position = 1.5
# Compare actual and expected results.
self.assertAllClose(mean_position, expected_mean_position)
self.assertAllClose(median_position, expected_median_position)
def testMeanMedianPositionMaxPredictionsWorks(self):
# Define input.
predictions = _CreateRetrievalPredictions()
solution = _CreateRetrievalSolution()
# Run tested function.
mean_position, median_position = metrics.MeanMedianPosition(
predictions, solution, max_predictions=1)
# Define expected results.
expected_mean_position = 1.5
expected_median_position = 1.5
# Compare actual and expected results.
self.assertAllClose(mean_position, expected_mean_position)
self.assertAllClose(median_position, expected_median_position)
if __name__ == '__main__':
tf.test.main()
| 6,467 | 28.4 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/googlelandmarks.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Google Landmarks Dataset(GLD).
Placeholder for Google Landmarks dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
class _GoogleLandmarksInfo(object):
"""Metadata about the Google Landmarks dataset."""
num_classes = {'gld_v1': 14951, 'gld_v2': 203094, 'gld_v2_clean': 81313}
class _DataAugmentationParams(object):
"""Default parameters for augmentation."""
# The following are used for training.
min_object_covered = 0.1
aspect_ratio_range_min = 3. / 4
aspect_ratio_range_max = 4. / 3
area_range_min = 0.08
area_range_max = 1.0
max_attempts = 100
update_labels = False
# 'central_fraction' is used for central crop in inference.
central_fraction = 0.875
random_reflection = False
def NormalizeImages(images, pixel_value_scale=0.5, pixel_value_offset=0.5):
"""Normalize pixel values in image.
Output is computed as
normalized_images = (images - pixel_value_offset) / pixel_value_scale.
Args:
images: `Tensor`, images to normalize.
pixel_value_scale: float, scale.
pixel_value_offset: float, offset.
Returns:
normalized_images: `Tensor`, normalized images.
"""
images = tf.cast(images, tf.float32)
normalized_images = tf.math.divide(
tf.subtract(images, pixel_value_offset), pixel_value_scale)
return normalized_images
def _ImageNetCrop(image, image_size):
"""Imagenet-style crop with random bbox and aspect ratio.
Args:
image: a `Tensor`, image to crop.
image_size: an `int`. The image size for the decoded image, on each side.
Returns:
cropped_image: `Tensor`, cropped image.
"""
params = _DataAugmentationParams()
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
(bbox_begin, bbox_size, _) = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=params.min_object_covered,
aspect_ratio_range=(params.aspect_ratio_range_min,
params.aspect_ratio_range_max),
area_range=(params.area_range_min, params.area_range_max),
max_attempts=params.max_attempts,
use_image_if_no_bounding_boxes=True)
cropped_image = tf.slice(image, bbox_begin, bbox_size)
cropped_image.set_shape([None, None, 3])
cropped_image = tf.image.resize(
cropped_image, [image_size, image_size], method='area')
if params.random_reflection:
cropped_image = tf.image.random_flip_left_right(cropped_image)
return cropped_image
def _ParseFunction(example, name_to_features, image_size, augmentation):
"""Parse a single TFExample to get the image and label and process the image.
Args:
example: a `TFExample`.
name_to_features: a `dict`. The mapping from feature names to its type.
image_size: an `int`. The image size for the decoded image, on each side.
augmentation: a `boolean`. True if the image will be augmented.
Returns:
image: a `Tensor`. The processed image.
label: a `Tensor`. The ground-truth label.
"""
parsed_example = tf.io.parse_single_example(example, name_to_features)
# Parse to get image.
image = parsed_example['image/encoded']
image = tf.io.decode_jpeg(image)
image = NormalizeImages(
image, pixel_value_scale=128.0, pixel_value_offset=128.0)
if augmentation:
image = _ImageNetCrop(image, image_size)
else:
image = tf.image.resize(image, [image_size, image_size])
image.set_shape([image_size, image_size, 3])
# Parse to get label.
label = parsed_example['image/class/label']
return image, label
def CreateDataset(file_pattern,
image_size=321,
batch_size=32,
augmentation=False,
seed=0):
"""Creates a dataset.
Args:
file_pattern: str, file pattern of the dataset files.
image_size: int, image size.
batch_size: int, batch size.
augmentation: bool, whether to apply augmentation.
seed: int, seed for shuffling the dataset.
Returns:
tf.data.TFRecordDataset.
"""
filenames = tf.io.gfile.glob(file_pattern)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.repeat().shuffle(buffer_size=100, seed=seed)
# Create a description of the features.
feature_description = {
'image/height': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image/width': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image/channels': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image/format': tf.io.FixedLenFeature([], tf.string, default_value=''),
'image/id': tf.io.FixedLenFeature([], tf.string, default_value=''),
'image/filename': tf.io.FixedLenFeature([], tf.string, default_value=''),
'image/encoded': tf.io.FixedLenFeature([], tf.string, default_value=''),
'image/class/label': tf.io.FixedLenFeature([], tf.int64, default_value=0),
}
customized_parse_func = functools.partial(
_ParseFunction,
name_to_features=feature_description,
image_size=image_size,
augmentation=augmentation)
dataset = dataset.map(customized_parse_func)
dataset = dataset.batch(batch_size)
return dataset
def GoogleLandmarksInfo():
"""Returns metadata information on the Google Landmarks dataset.
Returns:
object _GoogleLandmarksInfo containing metadata about the GLD dataset.
"""
return _GoogleLandmarksInfo()
| 6,169 | 31.994652 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/dataset_file_io_test.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset file IO module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import tensorflow as tf
from delf.python.datasets.google_landmarks_dataset import dataset_file_io
FLAGS = flags.FLAGS
class DatasetFileIoTest(tf.test.TestCase):
def testReadRecognitionSolutionWorks(self):
# Define inputs.
file_path = os.path.join(FLAGS.test_tmpdir, 'recognition_solution.csv')
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write('id,landmarks,Usage\n')
f.write('0123456789abcdef,0 12,Public\n')
f.write('0223456789abcdef,,Public\n')
f.write('0323456789abcdef,100,Ignored\n')
f.write('0423456789abcdef,1,Private\n')
f.write('0523456789abcdef,,Ignored\n')
# Run tested function.
(public_solution, private_solution,
ignored_ids) = dataset_file_io.ReadSolution(
file_path, dataset_file_io.RECOGNITION_TASK_ID)
# Define expected results.
expected_public_solution = {
'0123456789abcdef': [0, 12],
'0223456789abcdef': []
}
expected_private_solution = {
'0423456789abcdef': [1],
}
expected_ignored_ids = ['0323456789abcdef', '0523456789abcdef']
# Compare actual and expected results.
self.assertEqual(public_solution, expected_public_solution)
self.assertEqual(private_solution, expected_private_solution)
self.assertEqual(ignored_ids, expected_ignored_ids)
def testReadRetrievalSolutionWorks(self):
# Define inputs.
file_path = os.path.join(FLAGS.test_tmpdir, 'retrieval_solution.csv')
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write('id,images,Usage\n')
f.write('0123456789abcdef,None,Ignored\n')
f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200,Public\n')
f.write('0323456789abcdef,fedcba9876543200,Private\n')
f.write('0423456789abcdef,fedcba9876543220,Private\n')
f.write('0523456789abcdef,None,Ignored\n')
# Run tested function.
(public_solution, private_solution,
ignored_ids) = dataset_file_io.ReadSolution(
file_path, dataset_file_io.RETRIEVAL_TASK_ID)
# Define expected results.
expected_public_solution = {
'0223456789abcdef': ['fedcba9876543210', 'fedcba9876543200'],
}
expected_private_solution = {
'0323456789abcdef': ['fedcba9876543200'],
'0423456789abcdef': ['fedcba9876543220'],
}
expected_ignored_ids = ['0123456789abcdef', '0523456789abcdef']
# Compare actual and expected results.
self.assertEqual(public_solution, expected_public_solution)
self.assertEqual(private_solution, expected_private_solution)
self.assertEqual(ignored_ids, expected_ignored_ids)
def testReadRecognitionPredictionsWorks(self):
# Define inputs.
file_path = os.path.join(FLAGS.test_tmpdir, 'recognition_predictions.csv')
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write('id,landmarks\n')
f.write('0123456789abcdef,12 0.1 \n')
f.write('0423456789abcdef,0 19.0\n')
f.write('0223456789abcdef,\n')
f.write('\n')
f.write('0523456789abcdef,14 0.01\n')
public_ids = ['0123456789abcdef', '0223456789abcdef']
private_ids = ['0423456789abcdef']
ignored_ids = ['0323456789abcdef', '0523456789abcdef']
# Run tested function.
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
file_path, public_ids, private_ids, ignored_ids,
dataset_file_io.RECOGNITION_TASK_ID)
# Define expected results.
expected_public_predictions = {
'0123456789abcdef': {
'class': 12,
'score': 0.1
}
}
expected_private_predictions = {
'0423456789abcdef': {
'class': 0,
'score': 19.0
}
}
# Compare actual and expected results.
self.assertEqual(public_predictions, expected_public_predictions)
self.assertEqual(private_predictions, expected_private_predictions)
def testReadRetrievalPredictionsWorks(self):
# Define inputs.
file_path = os.path.join(FLAGS.test_tmpdir, 'retrieval_predictions.csv')
with tf.io.gfile.GFile(file_path, 'w') as f:
f.write('id,images\n')
f.write('0123456789abcdef,fedcba9876543250 \n')
f.write('0423456789abcdef,fedcba9876543260\n')
f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200 '
'fedcba9876543220\n')
f.write('\n')
f.write('0523456789abcdef,\n')
public_ids = ['0223456789abcdef']
private_ids = ['0323456789abcdef', '0423456789abcdef']
ignored_ids = ['0123456789abcdef', '0523456789abcdef']
# Run tested function.
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
file_path, public_ids, private_ids, ignored_ids,
dataset_file_io.RETRIEVAL_TASK_ID)
# Define expected results.
expected_public_predictions = {
'0223456789abcdef': [
'fedcba9876543210', 'fedcba9876543200', 'fedcba9876543220'
]
}
expected_private_predictions = {'0423456789abcdef': ['fedcba9876543260']}
# Compare actual and expected results.
self.assertEqual(public_predictions, expected_public_predictions)
self.assertEqual(private_predictions, expected_private_predictions)
if __name__ == '__main__':
tf.test.main()
| 6,075 | 35.383234 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/metrics.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module to compute metrics for Google Landmarks dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def _CountPositives(solution):
"""Counts number of test images with non-empty ground-truth in `solution`.
Args:
solution: Dict mapping test image ID to list of ground-truth IDs.
Returns:
count: Number of test images with non-empty ground-truth.
"""
count = 0
for v in solution.values():
if v:
count += 1
return count
def GlobalAveragePrecision(predictions,
recognition_solution,
ignore_non_gt_test_images=False):
"""Computes global average precision for recognition prediction.
Args:
predictions: Dict mapping test image ID to a dict with keys 'class'
(integer) and 'score' (float).
recognition_solution: Dict mapping test image ID to list of ground-truth
landmark IDs.
ignore_non_gt_test_images: If True, ignore test images which do not have
associated ground-truth landmark IDs. For the Google Landmark Recognition
challenge, this should be set to False.
Returns:
gap: Global average precision score (float).
"""
# Compute number of expected results.
num_positives = _CountPositives(recognition_solution)
gap = 0.0
total_predictions = 0
correct_predictions = 0
# Sort predictions according to Kaggle's convention:
# - first by score (descending);
# - then by key (ascending);
# - then by class (ascending).
sorted_predictions_by_key_class = sorted(
predictions.items(), key=lambda item: (item[0], item[1]['class']))
sorted_predictions = sorted(
sorted_predictions_by_key_class,
key=lambda item: item[1]['score'],
reverse=True)
# Loop over sorted predictions (descending order) and compute GAPs.
for key, prediction in sorted_predictions:
if ignore_non_gt_test_images and not recognition_solution[key]:
continue
total_predictions += 1
if prediction['class'] in recognition_solution[key]:
correct_predictions += 1
gap += correct_predictions / total_predictions
gap /= num_positives
return gap
def Top1Accuracy(predictions, recognition_solution):
"""Computes top-1 accuracy for recognition prediction.
Note that test images without ground-truth are ignored.
Args:
predictions: Dict mapping test image ID to a dict with keys 'class'
(integer) and 'score' (float).
recognition_solution: Dict mapping test image ID to list of ground-truth
landmark IDs.
Returns:
accuracy: Top-1 accuracy (float).
"""
# Loop over test images in solution. If it has at least one class label, we
# check if the predicion is correct.
num_correct_predictions = 0
num_test_images_with_ground_truth = 0
for key, ground_truth in recognition_solution.items():
if ground_truth:
num_test_images_with_ground_truth += 1
if key in predictions:
if predictions[key]['class'] in ground_truth:
num_correct_predictions += 1
return num_correct_predictions / num_test_images_with_ground_truth
def MeanAveragePrecision(predictions, retrieval_solution, max_predictions=100):
"""Computes mean average precision for retrieval prediction.
Args:
predictions: Dict mapping test image ID to a list of strings corresponding
to index image IDs.
retrieval_solution: Dict mapping test image ID to list of ground-truth image
IDs.
max_predictions: Maximum number of predictions per query to take into
account. For the Google Landmark Retrieval challenge, this should be set
to 100.
Returns:
mean_ap: Mean average precision score (float).
Raises:
ValueError: If a test image in `predictions` is not included in
`retrieval_solutions`.
"""
# Compute number of test images.
num_test_images = len(retrieval_solution.keys())
# Loop over predictions for each query and compute mAP.
mean_ap = 0.0
for key, prediction in predictions.items():
if key not in retrieval_solution:
raise ValueError('Test image %s is not part of retrieval_solution' % key)
# Loop over predicted images, keeping track of those which were already
# used (duplicates are skipped).
ap = 0.0
already_predicted = set()
num_expected_retrieved = min(len(retrieval_solution[key]), max_predictions)
num_correct = 0
for i in range(min(len(prediction), max_predictions)):
if prediction[i] not in already_predicted:
if prediction[i] in retrieval_solution[key]:
num_correct += 1
ap += num_correct / (i + 1)
already_predicted.add(prediction[i])
ap /= num_expected_retrieved
mean_ap += ap
mean_ap /= num_test_images
return mean_ap
def MeanPrecisions(predictions, retrieval_solution, max_predictions=100):
"""Computes mean precisions for retrieval prediction.
Args:
predictions: Dict mapping test image ID to a list of strings corresponding
to index image IDs.
retrieval_solution: Dict mapping test image ID to list of ground-truth image
IDs.
max_predictions: Maximum number of predictions per query to take into
account.
Returns:
mean_precisions: NumPy array with mean precisions at ranks 1 through
`max_predictions`.
Raises:
ValueError: If a test image in `predictions` is not included in
`retrieval_solutions`.
"""
# Compute number of test images.
num_test_images = len(retrieval_solution.keys())
# Loop over predictions for each query and compute precisions@k.
precisions = np.zeros((num_test_images, max_predictions))
count_test_images = 0
for key, prediction in predictions.items():
if key not in retrieval_solution:
raise ValueError('Test image %s is not part of retrieval_solution' % key)
# Loop over predicted images, keeping track of those which were already
# used (duplicates are skipped).
already_predicted = set()
num_correct = 0
for i in range(max_predictions):
if i < len(prediction):
if prediction[i] not in already_predicted:
if prediction[i] in retrieval_solution[key]:
num_correct += 1
already_predicted.add(prediction[i])
precisions[count_test_images, i] = num_correct / (i + 1)
count_test_images += 1
mean_precisions = np.mean(precisions, axis=0)
return mean_precisions
def MeanMedianPosition(predictions, retrieval_solution, max_predictions=100):
"""Computes mean and median positions of first correct image.
Args:
predictions: Dict mapping test image ID to a list of strings corresponding
to index image IDs.
retrieval_solution: Dict mapping test image ID to list of ground-truth image
IDs.
max_predictions: Maximum number of predictions per query to take into
account.
Returns:
mean_position: Float.
median_position: Float.
Raises:
ValueError: If a test image in `predictions` is not included in
`retrieval_solutions`.
"""
# Compute number of test images.
num_test_images = len(retrieval_solution.keys())
# Loop over predictions for each query to find first correct ranked image.
positions = (max_predictions + 1) * np.ones((num_test_images))
count_test_images = 0
for key, prediction in predictions.items():
if key not in retrieval_solution:
raise ValueError('Test image %s is not part of retrieval_solution' % key)
for i in range(min(len(prediction), max_predictions)):
if prediction[i] in retrieval_solution[key]:
positions[count_test_images] = i + 1
break
count_test_images += 1
mean_position = np.mean(positions)
median_position = np.median(positions)
return mean_position, median_position
| 8,490 | 32.298039 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module exposing Google Landmarks dataset for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.datasets.google_landmarks_dataset import googlelandmarks
# pylint: enable=unused-import
| 996 | 42.347826 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/compute_recognition_metrics.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes metrics for Google Landmarks Recognition dataset predictions.
Metrics are written to stdout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from absl import app
from delf.python.datasets.google_landmarks_dataset import dataset_file_io
from delf.python.datasets.google_landmarks_dataset import metrics
cmd_args = None
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read solution.
print('Reading solution...')
public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution(
cmd_args.solution_path, dataset_file_io.RECOGNITION_TASK_ID)
print('done!')
# Read predictions.
print('Reading predictions...')
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
cmd_args.predictions_path, set(public_solution.keys()),
set(private_solution.keys()), set(ignored_ids),
dataset_file_io.RECOGNITION_TASK_ID)
print('done!')
# Global Average Precision.
print('**********************************************')
print('(Public) Global Average Precision: %f' %
metrics.GlobalAveragePrecision(public_predictions, public_solution))
print('(Private) Global Average Precision: %f' %
metrics.GlobalAveragePrecision(private_predictions, private_solution))
# Global Average Precision ignoring non-landmark queries.
print('**********************************************')
print(
'(Public) Global Average Precision ignoring non-landmark queries: %f' %
metrics.GlobalAveragePrecision(
public_predictions, public_solution, ignore_non_gt_test_images=True))
print(
'(Private) Global Average Precision ignoring non-landmark queries: %f' %
metrics.GlobalAveragePrecision(
private_predictions, private_solution,
ignore_non_gt_test_images=True))
# Top-1 accuracy.
print('**********************************************')
print('(Public) Top-1 accuracy: %.2f' %
(100.0 * metrics.Top1Accuracy(public_predictions, public_solution)))
print('(Private) Top-1 accuracy: %.2f' %
(100.0 * metrics.Top1Accuracy(private_predictions, private_solution)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--predictions_path',
type=str,
default='/tmp/predictions.csv',
help="""
Path to CSV predictions file, formatted with columns 'id,landmarks' (the
file should include a header).
""")
parser.add_argument(
'--solution_path',
type=str,
default='/tmp/solution.csv',
help="""
Path to CSV solution file, formatted with columns 'id,landmarks,Usage'
(the file should include a header).
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 3,672 | 35.73 | 80 | py |
models | models-master/research/delf/delf/python/datasets/google_landmarks_dataset/dataset_file_io.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IO module for files from Landmark recognition/retrieval challenges."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import tensorflow as tf
RECOGNITION_TASK_ID = 'recognition'
RETRIEVAL_TASK_ID = 'retrieval'
def ReadSolution(file_path, task):
"""Reads solution from file, for a given task.
Args:
file_path: Path to CSV file with solution. File contains a header.
task: Type of challenge task. Supported values: 'recognition', 'retrieval'.
Returns:
public_solution: Dict mapping test image ID to list of ground-truth IDs, for
the Public subset of test images. If `task` == 'recognition', the IDs are
integers corresponding to landmark IDs. If `task` == 'retrieval', the IDs
are strings corresponding to index image IDs.
private_solution: Same as `public_solution`, but for the private subset of
test images.
ignored_ids: List of test images that are ignored in scoring.
Raises:
ValueError: If Usage field is not Public, Private or Ignored; or if `task`
is not supported.
"""
public_solution = {}
private_solution = {}
ignored_ids = []
with tf.io.gfile.GFile(file_path, 'r') as csv_file:
reader = csv.reader(csv_file)
next(reader, None) # Skip header.
for row in reader:
test_id = row[0]
if row[2] == 'Ignored':
ignored_ids.append(test_id)
else:
ground_truth_ids = []
if task == RECOGNITION_TASK_ID:
if row[1]:
for landmark_id in row[1].split(' '):
ground_truth_ids.append(int(landmark_id))
elif task == RETRIEVAL_TASK_ID:
for image_id in row[1].split(' '):
ground_truth_ids.append(image_id)
else:
raise ValueError('Unrecognized task: %s' % task)
if row[2] == 'Public':
public_solution[test_id] = ground_truth_ids
elif row[2] == 'Private':
private_solution[test_id] = ground_truth_ids
else:
raise ValueError('Test image %s has unrecognized Usage tag %s' %
(row[0], row[2]))
return public_solution, private_solution, ignored_ids
def ReadPredictions(file_path, public_ids, private_ids, ignored_ids, task):
"""Reads predictions from file, for a given task.
Args:
file_path: Path to CSV file with predictions. File contains a header.
public_ids: Set (or list) of test image IDs in Public subset of test images.
private_ids: Same as `public_ids`, but for the private subset of test
images.
ignored_ids: Set (or list) of test image IDs that are ignored in scoring and
are associated to no ground-truth.
task: Type of challenge task. Supported values: 'recognition', 'retrieval'.
Returns:
public_predictions: Dict mapping test image ID to prediction, for the Public
subset of test images. If `task` == 'recognition', the prediction is a
dict with keys 'class' (integer) and 'score' (float). If `task` ==
'retrieval', the prediction is a list of strings corresponding to index
image IDs.
private_predictions: Same as `public_predictions`, but for the private
subset of test images.
Raises:
ValueError:
- If test image ID is unrecognized/repeated;
- If `task` is not supported;
- If prediction is malformed.
"""
public_predictions = {}
private_predictions = {}
with tf.io.gfile.GFile(file_path, 'r') as csv_file:
reader = csv.reader(csv_file)
next(reader, None) # Skip header.
for row in reader:
# Skip row if empty.
if not row:
continue
test_id = row[0]
# Makes sure this query has not yet been seen.
if test_id in public_predictions:
raise ValueError('Test image %s is repeated.' % test_id)
if test_id in private_predictions:
raise ValueError('Test image %s is repeated' % test_id)
# If ignored, skip it.
if test_id in ignored_ids:
continue
# Only parse result if there is a prediction.
if row[1]:
prediction_split = row[1].split(' ')
# Remove empty spaces at end (if any).
if not prediction_split[-1]:
prediction_split = prediction_split[:-1]
if task == RECOGNITION_TASK_ID:
if len(prediction_split) != 2:
raise ValueError('Prediction is malformed: there should only be 2 '
'elements in second column, but found %d for test '
'image %s' % (len(prediction_split), test_id))
landmark_id = int(prediction_split[0])
score = float(prediction_split[1])
prediction_entry = {'class': landmark_id, 'score': score}
elif task == RETRIEVAL_TASK_ID:
prediction_entry = prediction_split
else:
raise ValueError('Unrecognized task: %s' % task)
if test_id in public_ids:
public_predictions[test_id] = prediction_entry
elif test_id in private_ids:
private_predictions[test_id] = prediction_entry
else:
raise ValueError('test_id %s is unrecognized' % test_id)
return public_predictions, private_predictions
| 5,921 | 36.0125 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/extract_index_boxes_and_features.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts DELF and boxes from the Revisited Oxford/Paris index datasets.
Boxes are saved to <image_name>.boxes files. DELF features are extracted for the
entire image and saved into <image_name>.delf files. In addition, DELF features
are extracted for each high-confidence bounding box in the image, and saved into
files named <image_name>_0.delf, <image_name>_1.delf, etc.
The program checks if descriptors/boxes already exist, and skips computation for
those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from absl import app
from delf.python.datasets.revisited_op import dataset
from delf.python.detect_to_retrieve import boxes_and_features_extraction
cmd_args = None
_IMAGE_EXTENSION = '.jpg'
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of index images from dataset file.
print('Reading list of index images from dataset file...')
_, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path)
num_images = len(index_list)
print('done! Found %d images' % num_images)
# Compose list of image paths.
image_paths = [
os.path.join(cmd_args.images_dir, index_image_name + _IMAGE_EXTENSION)
for index_image_name in index_list
]
# Extract boxes/features and save them to files.
boxes_and_features_extraction.ExtractBoxesAndFeaturesToFiles(
image_names=index_list,
image_paths=image_paths,
delf_config_path=cmd_args.delf_config_path,
detector_model_dir=cmd_args.detector_model_dir,
detector_thresh=cmd_args.detector_thresh,
output_features_dir=cmd_args.output_features_dir,
output_boxes_dir=cmd_args.output_boxes_dir,
output_mapping=cmd_args.output_index_mapping)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--delf_config_path',
type=str,
default='/tmp/delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--detector_model_dir',
type=str,
default='/tmp/detector_model',
help="""
Directory where detector SavedModel is located.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=0.1,
help="""
Threshold used to decide if an image's detected box undergoes feature
extraction. For all detected boxes with detection score larger than this,
a .delf file is saved containing the box features. Note that this
threshold is used only to select which boxes are used in feature
extraction; all detected boxes are actually saved in the .boxes file, even
those with score lower than detector_thresh.
""")
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format.
""")
parser.add_argument(
'--images_dir',
type=str,
default='/tmp/images',
help="""
Directory where dataset images are located, all in .jpg format.
""")
parser.add_argument(
'--output_boxes_dir',
type=str,
default='/tmp/boxes',
help="""
Directory where detected boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_features_dir',
type=str,
default='/tmp/features',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf,
eg: <image_name>.delf. In addition, DELF features are extracted for each
high-confidence bounding box in the image, and saved into files named
<image_name>_0.delf, <image_name>_1.delf, etc.
""")
parser.add_argument(
'--output_index_mapping',
type=str,
default='/tmp/index_mapping.csv',
help="""
CSV file which maps each .delf file name to the index image ID and
detected box ID. The format is 'name,index_image_id,box_id', including a
header. The 'name' refers to the .delf file name without extension.
For example, a few lines may be like:
'radcliffe_camera_000158,2,-1'
'radcliffe_camera_000158_0,2,0'
'radcliffe_camera_000158_1,2,1'
'radcliffe_camera_000158_2,2,2'
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 5,459 | 34.921053 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/image_reranking.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to re-rank images based on geometric verification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy import spatial
from skimage import feature
from skimage import measure
from skimage import transform
from delf import feature_io
# Extensions.
_DELF_EXTENSION = '.delf'
# Pace to log.
_STATUS_CHECK_GV_ITERATIONS = 10
# Re-ranking / geometric verification parameters.
_NUM_TO_RERANK = 100
_NUM_RANSAC_TRIALS = 1000
_MIN_RANSAC_SAMPLES = 3
def MatchFeatures(query_locations,
query_descriptors,
index_image_locations,
index_image_descriptors,
ransac_seed=None,
descriptor_matching_threshold=0.9,
ransac_residual_threshold=10.0,
query_im_array=None,
index_im_array=None,
query_im_scale_factors=None,
index_im_scale_factors=None,
use_ratio_test=False):
"""Matches local features using geometric verification.
First, finds putative local feature matches by matching `query_descriptors`
against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an
affine transformation between the putative feature corresponces using their
locations.
Args:
query_locations: Locations of local features for query image. NumPy array of
shape [#query_features, 2].
query_descriptors: Descriptors of local features for query image. NumPy
array of shape [#query_features, depth].
index_image_locations: Locations of local features for index image. NumPy
array of shape [#index_image_features, 2].
index_image_descriptors: Descriptors of local features for index image.
NumPy array of shape [#index_image_features, depth].
ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
descriptor_matching_threshold: Threshold below which a pair of local
descriptors is considered a potential match, and will be fed into RANSAC.
If use_ratio_test==False, this is a simple distance threshold. If
use_ratio_test==True, this is Lowe's ratio test threshold.
ransac_residual_threshold: Residual error threshold for considering matches
as inliers, used in RANSAC algorithm.
query_im_array: Optional. If not None, contains a NumPy array with the query
image, used to produce match visualization, if there is a match.
index_im_array: Optional. Same as `query_im_array`, but for index image.
query_im_scale_factors: Optional. If not None, contains a NumPy array with
the query image scales, used to produce match visualization, if there is a
match. If None and a visualization will be produced, [1.0, 1.0] is used
(ie, feature locations are not scaled).
index_im_scale_factors: Optional. Same as `query_im_scale_factors`, but for
index image.
use_ratio_test: If True, descriptor matching is performed via ratio test,
instead of distance-based threshold.
Returns:
score: Number of inliers of match. If no match is found, returns 0.
match_viz_bytes: Encoded image bytes with visualization of the match, if
there is one, and if `query_im_array` and `index_im_array` are properly
set. Otherwise, it's an empty bytes string.
Raises:
ValueError: If local descriptors from query and index images have different
dimensionalities.
"""
num_features_query = query_locations.shape[0]
num_features_index_image = index_image_locations.shape[0]
if not num_features_query or not num_features_index_image:
return 0, b''
local_feature_dim = query_descriptors.shape[1]
if index_image_descriptors.shape[1] != local_feature_dim:
raise ValueError(
'Local feature dimensionality is not consistent for query and index '
'images.')
# Construct KD-tree used to find nearest neighbors.
index_image_tree = spatial.cKDTree(index_image_descriptors)
if use_ratio_test:
distances, indices = index_image_tree.query(
query_descriptors, k=2, n_jobs=-1)
query_locations_to_use = np.array([
query_locations[i,]
for i in range(num_features_query)
if distances[i][0] < descriptor_matching_threshold * distances[i][1]
])
index_image_locations_to_use = np.array([
index_image_locations[indices[i][0],]
for i in range(num_features_query)
if distances[i][0] < descriptor_matching_threshold * distances[i][1]
])
else:
_, indices = index_image_tree.query(
query_descriptors,
distance_upper_bound=descriptor_matching_threshold,
n_jobs=-1)
# Select feature locations for putative matches.
query_locations_to_use = np.array([
query_locations[i,]
for i in range(num_features_query)
if indices[i] != num_features_index_image
])
index_image_locations_to_use = np.array([
index_image_locations[indices[i],]
for i in range(num_features_query)
if indices[i] != num_features_index_image
])
# If there are not enough putative matches, early return 0.
if query_locations_to_use.shape[0] <= _MIN_RANSAC_SAMPLES:
return 0, b''
# Perform geometric verification using RANSAC.
_, inliers = measure.ransac(
(index_image_locations_to_use, query_locations_to_use),
transform.AffineTransform,
min_samples=_MIN_RANSAC_SAMPLES,
residual_threshold=ransac_residual_threshold,
max_trials=_NUM_RANSAC_TRIALS,
random_state=ransac_seed)
match_viz_bytes = b''
if inliers is None:
inliers = []
elif query_im_array is not None and index_im_array is not None:
if query_im_scale_factors is None:
query_im_scale_factors = [1.0, 1.0]
if index_im_scale_factors is None:
index_im_scale_factors = [1.0, 1.0]
inlier_idxs = np.nonzero(inliers)[0]
_, ax = plt.subplots()
ax.axis('off')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
feature.plot_matches(
ax,
query_im_array,
index_im_array,
query_locations_to_use * query_im_scale_factors,
index_image_locations_to_use * index_im_scale_factors,
np.column_stack((inlier_idxs, inlier_idxs)),
only_matches=True)
match_viz_io = io.BytesIO()
plt.savefig(match_viz_io, format='jpeg', bbox_inches='tight', pad_inches=0)
match_viz_bytes = match_viz_io.getvalue()
return sum(inliers), match_viz_bytes
def RerankByGeometricVerification(input_ranks,
initial_scores,
query_name,
index_names,
query_features_dir,
index_features_dir,
junk_ids,
local_feature_extension=_DELF_EXTENSION,
ransac_seed=None,
descriptor_matching_threshold=0.9,
ransac_residual_threshold=10.0,
use_ratio_test=False):
"""Re-ranks retrieval results using geometric verification.
Args:
input_ranks: 1D NumPy array with indices of top-ranked index images, sorted
from the most to the least similar.
initial_scores: 1D NumPy array with initial similarity scores between query
and index images. Entry i corresponds to score for image i.
query_name: Name for query image (string).
index_names: List of names for index images (strings).
query_features_dir: Directory where query local feature file is located
(string).
index_features_dir: Directory where index local feature files are located
(string).
junk_ids: Set with indices of junk images which should not be considered
during re-ranking.
local_feature_extension: String, extension to use for loading local feature
files.
ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
descriptor_matching_threshold: Threshold used for local descriptor matching.
ransac_residual_threshold: Residual error threshold for considering matches
as inliers, used in RANSAC algorithm.
use_ratio_test: If True, descriptor matching is performed via ratio test,
instead of distance-based threshold.
Returns:
output_ranks: 1D NumPy array with index image indices, sorted from the most
to the least similar according to the geometric verification and initial
scores.
Raises:
ValueError: If `input_ranks`, `initial_scores` and `index_names` do not have
the same number of entries.
"""
num_index_images = len(index_names)
if len(input_ranks) != num_index_images:
raise ValueError('input_ranks and index_names have different number of '
'elements: %d vs %d' %
(len(input_ranks), len(index_names)))
if len(initial_scores) != num_index_images:
raise ValueError('initial_scores and index_names have different number of '
'elements: %d vs %d' %
(len(initial_scores), len(index_names)))
# Filter out junk images from list that will be re-ranked.
input_ranks_for_gv = []
for ind in input_ranks:
if ind not in junk_ids:
input_ranks_for_gv.append(ind)
num_to_rerank = min(_NUM_TO_RERANK, len(input_ranks_for_gv))
# Load query image features.
query_features_path = os.path.join(query_features_dir,
query_name + local_feature_extension)
query_locations, _, query_descriptors, _, _ = feature_io.ReadFromFile(
query_features_path)
# Initialize list containing number of inliers and initial similarity scores.
inliers_and_initial_scores = []
for i in range(num_index_images):
inliers_and_initial_scores.append([0, initial_scores[i]])
# Loop over top-ranked images and get results.
print('Starting to re-rank')
for i in range(num_to_rerank):
if i > 0 and i % _STATUS_CHECK_GV_ITERATIONS == 0:
print('Re-ranking: i = %d out of %d' % (i, num_to_rerank))
index_image_id = input_ranks_for_gv[i]
# Load index image features.
index_image_features_path = os.path.join(
index_features_dir,
index_names[index_image_id] + local_feature_extension)
(index_image_locations, _, index_image_descriptors, _,
_) = feature_io.ReadFromFile(index_image_features_path)
inliers_and_initial_scores[index_image_id][0], _ = MatchFeatures(
query_locations,
query_descriptors,
index_image_locations,
index_image_descriptors,
ransac_seed=ransac_seed,
descriptor_matching_threshold=descriptor_matching_threshold,
ransac_residual_threshold=ransac_residual_threshold,
use_ratio_test=use_ratio_test)
# Sort based on (inliers_score, initial_score).
def _InliersInitialScoresSorting(k):
"""Helper function to sort list based on two entries.
Args:
k: Index into `inliers_and_initial_scores`.
Returns:
Tuple containing inlier score and initial score.
"""
return (inliers_and_initial_scores[k][0], inliers_and_initial_scores[k][1])
output_ranks = sorted(
range(num_index_images), key=_InliersInitialScoresSorting, reverse=True)
return output_ranks
| 12,294 | 39.444079 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/extract_query_features.py | # Lint as: python3
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts DELF features for query images from Revisited Oxford/Paris datasets.
Note that query images are cropped before feature extraction, as required by the
evaluation protocols of these datasets.
The program checks if descriptors already exist, and skips computation for
those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import feature_io
from delf import utils
from delf.python.datasets.revisited_op import dataset
from delf import extractor
cmd_args = None
# Extensions.
_DELF_EXTENSION = '.delf'
_IMAGE_EXTENSION = '.jpg'
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of query images from dataset file.
print('Reading list of query images and boxes from dataset file...')
query_list, _, ground_truth = dataset.ReadDatasetFile(
cmd_args.dataset_file_path)
num_images = len(query_list)
print(f'done! Found {num_images} images')
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.io.gfile.GFile(cmd_args.delf_config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directory if necessary.
if not tf.io.gfile.exists(cmd_args.output_features_dir):
tf.io.gfile.makedirs(cmd_args.output_features_dir)
extractor_fn = extractor.MakeExtractor(config)
start = time.time()
for i in range(num_images):
query_image_name = query_list[i]
input_image_filename = os.path.join(cmd_args.images_dir,
query_image_name + _IMAGE_EXTENSION)
output_feature_filename = os.path.join(cmd_args.output_features_dir,
query_image_name + _DELF_EXTENSION)
if tf.io.gfile.exists(output_feature_filename):
print(f'Skipping {query_image_name}')
continue
# Crop query image according to bounding box.
bbox = [int(round(b)) for b in ground_truth[i]['bbx']]
im = np.array(utils.RgbLoader(input_image_filename).crop(bbox))
# Extract and save features.
extracted_features = extractor_fn(im)
locations_out = extracted_features['local_features']['locations']
descriptors_out = extracted_features['local_features']['descriptors']
feature_scales_out = extracted_features['local_features']['scales']
attention_out = extracted_features['local_features']['attention']
feature_io.WriteToFile(output_feature_filename, locations_out,
feature_scales_out, descriptors_out, attention_out)
elapsed = (time.time() - start)
print('Processed %d query images in %f seconds' % (num_images, elapsed))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--delf_config_path',
type=str,
default='/tmp/delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format.
""")
parser.add_argument(
'--images_dir',
type=str,
default='/tmp/images',
help="""
Directory where dataset images are located, all in .jpg format.
""")
parser.add_argument(
'--output_features_dir',
type=str,
default='/tmp/features',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 4,714 | 33.166667 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/boxes_and_features_extraction.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to extract/save boxes and DELF features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import os
import time
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import box_io
from delf import feature_io
from delf import utils
from delf import detector
from delf import extractor
# Extension of feature files.
_BOX_EXTENSION = '.boxes'
_DELF_EXTENSION = '.delf'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _WriteMappingBasenameToIds(index_names_ids_and_boxes, output_path):
"""Helper function to write CSV mapping from DELF file name to IDs.
Args:
index_names_ids_and_boxes: List containing 3-element lists with name, image
ID and box ID.
output_path: Output CSV path.
"""
with tf.io.gfile.GFile(output_path, 'w') as f:
csv_writer = csv.DictWriter(
f, fieldnames=['name', 'index_image_id', 'box_id'])
csv_writer.writeheader()
for name_imid_boxid in index_names_ids_and_boxes:
csv_writer.writerow({
'name': name_imid_boxid[0],
'index_image_id': name_imid_boxid[1],
'box_id': name_imid_boxid[2],
})
def ExtractBoxesAndFeaturesToFiles(image_names, image_paths, delf_config_path,
detector_model_dir, detector_thresh,
output_features_dir, output_boxes_dir,
output_mapping):
"""Extracts boxes and features, saving them to files.
Boxes are saved to <image_name>.boxes files. DELF features are extracted for
the entire image and saved into <image_name>.delf files. In addition, DELF
features are extracted for each high-confidence bounding box in the image, and
saved into files named <image_name>_0.delf, <image_name>_1.delf, etc.
It checks if descriptors/boxes already exist, and skips computation for those.
Args:
image_names: List of image names. These are used to compose output file
names for boxes and features.
image_paths: List of image paths. image_paths[i] is the path for the image
named by image_names[i]. `image_names` and `image_paths` must have the
same number of elements.
delf_config_path: Path to DelfConfig proto text file.
detector_model_dir: Directory where detector SavedModel is located.
detector_thresh: Threshold used to decide if an image's detected box
undergoes feature extraction.
output_features_dir: Directory where DELF features will be written to.
output_boxes_dir: Directory where detected boxes will be written to.
output_mapping: CSV file which maps each .delf file name to the image ID and
detected box ID.
Raises:
ValueError: If len(image_names) and len(image_paths) are different.
"""
num_images = len(image_names)
if len(image_paths) != num_images:
raise ValueError(
'image_names and image_paths have different number of items')
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.io.gfile.GFile(delf_config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directories if necessary.
if not tf.io.gfile.exists(output_features_dir):
tf.io.gfile.makedirs(output_features_dir)
if not tf.io.gfile.exists(output_boxes_dir):
tf.io.gfile.makedirs(output_boxes_dir)
if not tf.io.gfile.exists(os.path.dirname(output_mapping)):
tf.io.gfile.makedirs(os.path.dirname(output_mapping))
names_ids_and_boxes = []
detector_fn = detector.MakeDetector(detector_model_dir)
delf_extractor_fn = extractor.MakeExtractor(config)
start = time.time()
for i in range(num_images):
if i == 0:
print('Starting to extract features/boxes...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print('Processing image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))
start = time.time()
image_name = image_names[i]
output_feature_filename_whole_image = os.path.join(
output_features_dir, image_name + _DELF_EXTENSION)
output_box_filename = os.path.join(output_boxes_dir,
image_name + _BOX_EXTENSION)
pil_im = utils.RgbLoader(image_paths[i])
width, height = pil_im.size
# Extract and save boxes.
if tf.io.gfile.exists(output_box_filename):
print('Skipping box computation for %s' % image_name)
(boxes_out, scores_out,
class_indices_out) = box_io.ReadFromFile(output_box_filename)
else:
(boxes_out, scores_out,
class_indices_out) = detector_fn(np.expand_dims(pil_im, 0))
# Using only one image per batch.
boxes_out = boxes_out[0]
scores_out = scores_out[0]
class_indices_out = class_indices_out[0]
box_io.WriteToFile(output_box_filename, boxes_out, scores_out,
class_indices_out)
# Select boxes with scores greater than threshold. Those will be the
# ones with extracted DELF features (besides the whole image, whose DELF
# features are extracted in all cases).
num_delf_files = 1
selected_boxes = []
for box_ind, box in enumerate(boxes_out):
if scores_out[box_ind] >= detector_thresh:
selected_boxes.append(box)
num_delf_files += len(selected_boxes)
# Extract and save DELF features.
for delf_file_ind in range(num_delf_files):
if delf_file_ind == 0:
box_name = image_name
output_feature_filename = output_feature_filename_whole_image
else:
box_name = image_name + '_' + str(delf_file_ind - 1)
output_feature_filename = os.path.join(output_features_dir,
box_name + _DELF_EXTENSION)
names_ids_and_boxes.append([box_name, i, delf_file_ind - 1])
if tf.io.gfile.exists(output_feature_filename):
print('Skipping DELF computation for %s' % box_name)
continue
if delf_file_ind >= 1:
bbox_for_cropping = selected_boxes[delf_file_ind - 1]
bbox_for_cropping_pil_convention = [
int(math.floor(bbox_for_cropping[1] * width)),
int(math.floor(bbox_for_cropping[0] * height)),
int(math.ceil(bbox_for_cropping[3] * width)),
int(math.ceil(bbox_for_cropping[2] * height))
]
pil_cropped_im = pil_im.crop(bbox_for_cropping_pil_convention)
im = np.array(pil_cropped_im)
else:
im = np.array(pil_im)
extracted_features = delf_extractor_fn(im)
locations_out = extracted_features['local_features']['locations']
descriptors_out = extracted_features['local_features']['descriptors']
feature_scales_out = extracted_features['local_features']['scales']
attention_out = extracted_features['local_features']['attention']
feature_io.WriteToFile(output_feature_filename, locations_out,
feature_scales_out, descriptors_out, attention_out)
# Save mapping from output DELF name to image id and box id.
_WriteMappingBasenameToIds(names_ids_and_boxes, output_mapping)
| 7,956 | 38.197044 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/perform_retrieval.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Performs image retrieval on Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from delf import aggregation_config_pb2
from delf import datum_io
from delf import feature_aggregation_similarity
from delf.python.datasets.revisited_op import dataset
from delf.python.detect_to_retrieve import image_reranking
cmd_args = None
# Aliases for aggregation types.
_VLAD = aggregation_config_pb2.AggregationConfig.VLAD
_ASMK = aggregation_config_pb2.AggregationConfig.ASMK
_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR
# Extensions.
_VLAD_EXTENSION_SUFFIX = 'vlad'
_ASMK_EXTENSION_SUFFIX = 'asmk'
_ASMK_STAR_EXTENSION_SUFFIX = 'asmk_star'
# Precision-recall ranks to use in metric computation.
_PR_RANKS = (1, 5, 10)
# Pace to log.
_STATUS_CHECK_LOAD_ITERATIONS = 50
# Output file names.
_METRICS_FILENAME = 'metrics.txt'
def _ReadAggregatedDescriptors(input_dir, image_list, config):
"""Reads aggregated descriptors.
Args:
input_dir: Directory where aggregated descriptors are located.
image_list: List of image names for which to load descriptors.
config: AggregationConfig used for images.
Returns:
aggregated_descriptors: List containing #images items, each a 1D NumPy
array.
visual_words: If using VLAD aggregation, returns an empty list. Otherwise,
returns a list containing #images items, each a 1D NumPy array.
"""
# Compose extension of aggregated descriptors.
extension = '.'
if config.use_regional_aggregation:
extension += 'r'
if config.aggregation_type == _VLAD:
extension += _VLAD_EXTENSION_SUFFIX
elif config.aggregation_type == _ASMK:
extension += _ASMK_EXTENSION_SUFFIX
elif config.aggregation_type == _ASMK_STAR:
extension += _ASMK_STAR_EXTENSION_SUFFIX
else:
raise ValueError('Invalid aggregation type: %d' % config.aggregation_type)
num_images = len(image_list)
aggregated_descriptors = []
visual_words = []
print('Starting to collect descriptors for %d images...' % num_images)
start = time.clock()
for i in range(num_images):
if i > 0 and i % _STATUS_CHECK_LOAD_ITERATIONS == 0:
elapsed = (time.clock() - start)
print('Reading descriptors for image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_LOAD_ITERATIONS, elapsed))
start = time.clock()
descriptors_filename = image_list[i] + extension
descriptors_fullpath = os.path.join(input_dir, descriptors_filename)
if config.aggregation_type == _VLAD:
aggregated_descriptors.append(datum_io.ReadFromFile(descriptors_fullpath))
else:
d, v = datum_io.ReadPairFromFile(descriptors_fullpath)
if config.aggregation_type == _ASMK_STAR:
d = d.astype('uint8')
aggregated_descriptors.append(d)
visual_words.append(v)
return aggregated_descriptors, visual_words
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Parse dataset to obtain query/index images, and ground-truth.
print('Parsing dataset...')
query_list, index_list, ground_truth = dataset.ReadDatasetFile(
cmd_args.dataset_file_path)
num_query_images = len(query_list)
num_index_images = len(index_list)
(_, medium_ground_truth,
hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth)
print('done! Found %d queries and %d index images' %
(num_query_images, num_index_images))
# Parse AggregationConfig protos.
query_config = aggregation_config_pb2.AggregationConfig()
with tf.io.gfile.GFile(cmd_args.query_aggregation_config_path, 'r') as f:
text_format.Merge(f.read(), query_config)
index_config = aggregation_config_pb2.AggregationConfig()
with tf.io.gfile.GFile(cmd_args.index_aggregation_config_path, 'r') as f:
text_format.Merge(f.read(), index_config)
# Read aggregated descriptors.
query_aggregated_descriptors, query_visual_words = _ReadAggregatedDescriptors(
cmd_args.query_aggregation_dir, query_list, query_config)
index_aggregated_descriptors, index_visual_words = _ReadAggregatedDescriptors(
cmd_args.index_aggregation_dir, index_list, index_config)
# Create similarity computer.
similarity_computer = (
feature_aggregation_similarity.SimilarityAggregatedRepresentation(
index_config))
# Compute similarity between query and index images, potentially re-ranking
# with geometric verification.
ranks_before_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
if cmd_args.use_geometric_verification:
medium_ranks_after_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
hard_ranks_after_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
for i in range(num_query_images):
print('Performing retrieval with query %d (%s)...' % (i, query_list[i]))
start = time.clock()
# Compute similarity between aggregated descriptors.
similarities = np.zeros([num_index_images])
for j in range(num_index_images):
similarities[j] = similarity_computer.ComputeSimilarity(
query_aggregated_descriptors[i], index_aggregated_descriptors[j],
query_visual_words[i], index_visual_words[j])
ranks_before_gv[i] = np.argsort(-similarities)
# Re-rank using geometric verification.
if cmd_args.use_geometric_verification:
medium_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification(
ranks_before_gv[i], similarities, query_list[i], index_list,
cmd_args.query_features_dir, cmd_args.index_features_dir,
set(medium_ground_truth[i]['junk']))
hard_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification(
ranks_before_gv[i], similarities, query_list[i], index_list,
cmd_args.query_features_dir, cmd_args.index_features_dir,
set(hard_ground_truth[i]['junk']))
elapsed = (time.clock() - start)
print('done! Retrieval for query %d took %f seconds' % (i, elapsed))
# Create output directory if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
# Compute metrics.
medium_metrics = dataset.ComputeMetrics(ranks_before_gv, medium_ground_truth,
_PR_RANKS)
hard_metrics = dataset.ComputeMetrics(ranks_before_gv, hard_ground_truth,
_PR_RANKS)
if cmd_args.use_geometric_verification:
medium_metrics_after_gv = dataset.ComputeMetrics(medium_ranks_after_gv,
medium_ground_truth,
_PR_RANKS)
hard_metrics_after_gv = dataset.ComputeMetrics(hard_ranks_after_gv,
hard_ground_truth, _PR_RANKS)
# Write metrics to file.
mean_average_precision_dict = {
'medium': medium_metrics[0],
'hard': hard_metrics[0]
}
mean_precisions_dict = {'medium': medium_metrics[1], 'hard': hard_metrics[1]}
mean_recalls_dict = {'medium': medium_metrics[2], 'hard': hard_metrics[2]}
if cmd_args.use_geometric_verification:
mean_average_precision_dict.update({
'medium_after_gv': medium_metrics_after_gv[0],
'hard_after_gv': hard_metrics_after_gv[0]
})
mean_precisions_dict.update({
'medium_after_gv': medium_metrics_after_gv[1],
'hard_after_gv': hard_metrics_after_gv[1]
})
mean_recalls_dict.update({
'medium_after_gv': medium_metrics_after_gv[2],
'hard_after_gv': hard_metrics_after_gv[2]
})
dataset.SaveMetricsFile(mean_average_precision_dict, mean_precisions_dict,
mean_recalls_dict, _PR_RANKS,
os.path.join(cmd_args.output_dir, _METRICS_FILENAME))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--index_aggregation_config_path',
type=str,
default='/tmp/index_aggregation_config.pbtxt',
help="""
Path to index AggregationConfig proto text file. This is used to load the
aggregated descriptors from the index, and to define the parameters used
in computing similarity for aggregated descriptors.
""")
parser.add_argument(
'--query_aggregation_config_path',
type=str,
default='/tmp/query_aggregation_config.pbtxt',
help="""
Path to query AggregationConfig proto text file. This is only used to load
the aggregated descriptors for the queries.
""")
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format.
""")
parser.add_argument(
'--index_aggregation_dir',
type=str,
default='/tmp/index_aggregation',
help="""
Directory where index aggregated descriptors are located.
""")
parser.add_argument(
'--query_aggregation_dir',
type=str,
default='/tmp/query_aggregation',
help="""
Directory where query aggregated descriptors are located.
""")
parser.add_argument(
'--use_geometric_verification',
type=lambda x: (str(x).lower() == 'true'),
default=False,
help="""
If True, performs re-ranking using local feature-based geometric
verification.
""")
parser.add_argument(
'--index_features_dir',
type=str,
default='/tmp/index_features',
help="""
Only used if `use_geometric_verification` is True.
Directory where index local image features are located, all in .delf
format.
""")
parser.add_argument(
'--query_features_dir',
type=str,
default='/tmp/query_features',
help="""
Only used if `use_geometric_verification` is True.
Directory where query local image features are located, all in .delf
format.
""")
parser.add_argument(
'--output_dir',
type=str,
default='/tmp/retrieval',
help="""
Directory where retrieval output will be written to. A file containing
metrics for this run is saved therein, with file name "metrics.txt".
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 11,453 | 36.80198 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/__init__.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for Detect-to-Retrieve technique."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from delf.python.detect_to_retrieve import aggregation_extraction
from delf.python.detect_to_retrieve import boxes_and_features_extraction
# pylint: enable=unused-import
| 1,047 | 42.666667 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/extract_aggregation.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts aggregation for images from Revisited Oxford/Paris datasets.
The program checks if the aggregated representation for an image already exists,
and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from absl import app
from delf.python.datasets.revisited_op import dataset
from delf.python.detect_to_retrieve import aggregation_extraction
cmd_args = None
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images from dataset file.
print('Reading list of images from dataset file...')
query_list, index_list, _ = dataset.ReadDatasetFile(
cmd_args.dataset_file_path)
if cmd_args.use_query_images:
image_list = query_list
else:
image_list = index_list
num_images = len(image_list)
print('done! Found %d images' % num_images)
aggregation_extraction.ExtractAggregatedRepresentationsToFiles(
image_names=image_list,
features_dir=cmd_args.features_dir,
aggregation_config_path=cmd_args.aggregation_config_path,
mapping_path=cmd_args.index_mapping_path,
output_aggregation_dir=cmd_args.output_aggregation_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--aggregation_config_path',
type=str,
default='/tmp/aggregation_config.pbtxt',
help="""
Path to AggregationConfig proto text file with configuration to be used
for extraction.
""")
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format.
""")
parser.add_argument(
'--use_query_images',
type=lambda x: (str(x).lower() == 'true'),
default=False,
help="""
If True, processes the query images of the dataset. If False, processes
the database (ie, index) images.
""")
parser.add_argument(
'--features_dir',
type=str,
default='/tmp/features',
help="""
Directory where image features are located, all in .delf format.
""")
parser.add_argument(
'--index_mapping_path',
type=str,
default='',
help="""
Optional CSV file which maps each .delf file name to the index image ID
and detected box ID. If regional aggregation is performed, this should be
set. Otherwise, this is ignored.
Usually this file is obtained as an output from the
`extract_index_boxes_and_features.py` script.
""")
parser.add_argument(
'--output_aggregation_dir',
type=str,
default='/tmp/aggregation',
help="""
Directory where aggregation output will be written to. Each image's
features will be written to a file with same name, and extension replaced
by one of
['.vlad', '.asmk', '.asmk_star', '.rvlad', '.rasmk', '.rasmk_star'].
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 3,888 | 33.114035 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/aggregation_extraction.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to extract/save feature aggregation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import time
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from delf import aggregation_config_pb2
from delf import datum_io
from delf import feature_aggregation_extractor
from delf import feature_io
# Aliases for aggregation types.
_VLAD = aggregation_config_pb2.AggregationConfig.VLAD
_ASMK = aggregation_config_pb2.AggregationConfig.ASMK
_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR
# Extensions.
_DELF_EXTENSION = '.delf'
_VLAD_EXTENSION_SUFFIX = 'vlad'
_ASMK_EXTENSION_SUFFIX = 'asmk'
_ASMK_STAR_EXTENSION_SUFFIX = 'asmk_star'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 50
def _ReadMappingBasenameToBoxNames(input_path, index_image_names):
"""Reads mapping from image name to DELF file names for each box.
Args:
input_path: Path to CSV file containing mapping.
index_image_names: List containing index image names, in order, for the
dataset under consideration.
Returns:
images_to_box_feature_files: Dict. key=string (image name); value=list of
strings (file names containing DELF features for boxes).
"""
images_to_box_feature_files = {}
with tf.io.gfile.GFile(input_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
index_image_name = index_image_names[int(row['index_image_id'])]
if index_image_name not in images_to_box_feature_files:
images_to_box_feature_files[index_image_name] = []
images_to_box_feature_files[index_image_name].append(row['name'])
return images_to_box_feature_files
def ExtractAggregatedRepresentationsToFiles(image_names, features_dir,
aggregation_config_path,
mapping_path,
output_aggregation_dir):
"""Extracts aggregated feature representations, saving them to files.
It checks if the aggregated representation for an image already exists,
and skips computation for those.
Args:
image_names: List of image names. These are used to compose input file names
for the feature files, and the output file names for aggregated
representations.
features_dir: Directory where DELF features are located.
aggregation_config_path: Path to AggregationConfig proto text file with
configuration to be used for extraction.
mapping_path: Optional CSV file which maps each .delf file name to the index
image ID and detected box ID. If regional aggregation is performed, this
should be set. Otherwise, this is ignored.
output_aggregation_dir: Directory where aggregation output will be written
to.
Raises:
ValueError: If AggregationConfig is malformed, or `mapping_path` is
missing.
"""
num_images = len(image_names)
# Parse AggregationConfig proto, and select output extension.
config = aggregation_config_pb2.AggregationConfig()
with tf.io.gfile.GFile(aggregation_config_path, 'r') as f:
text_format.Merge(f.read(), config)
output_extension = '.'
if config.use_regional_aggregation:
output_extension += 'r'
if config.aggregation_type == _VLAD:
output_extension += _VLAD_EXTENSION_SUFFIX
elif config.aggregation_type == _ASMK:
output_extension += _ASMK_EXTENSION_SUFFIX
elif config.aggregation_type == _ASMK_STAR:
output_extension += _ASMK_STAR_EXTENSION_SUFFIX
else:
raise ValueError('Invalid aggregation type: %d' % config.aggregation_type)
# Read index mapping path, if provided.
if mapping_path:
images_to_box_feature_files = _ReadMappingBasenameToBoxNames(
mapping_path, image_names)
# Create output directory if necessary.
if not tf.io.gfile.exists(output_aggregation_dir):
tf.io.gfile.makedirs(output_aggregation_dir)
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
start = time.time()
for i in range(num_images):
if i == 0:
print('Starting to extract aggregation from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print('Processing image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))
start = time.time()
image_name = image_names[i]
# Compose output file name, skip extraction for this image if it already
# exists.
output_aggregation_filename = os.path.join(output_aggregation_dir,
image_name + output_extension)
if tf.io.gfile.exists(output_aggregation_filename):
print('Skipping %s' % image_name)
continue
# Load DELF features.
if config.use_regional_aggregation:
if not mapping_path:
raise ValueError(
'Requested regional aggregation, but mapping_path was not '
'provided')
descriptors_list = []
num_features_per_box = []
for box_feature_file in images_to_box_feature_files[image_name]:
delf_filename = os.path.join(features_dir,
box_feature_file + _DELF_EXTENSION)
_, _, box_descriptors, _, _ = feature_io.ReadFromFile(delf_filename)
# If `box_descriptors` is empty, reshape it such that it can be
# concatenated with other descriptors.
if not box_descriptors.shape[0]:
box_descriptors = np.reshape(box_descriptors,
[0, config.feature_dimensionality])
descriptors_list.append(box_descriptors)
num_features_per_box.append(box_descriptors.shape[0])
descriptors = np.concatenate(descriptors_list)
else:
input_delf_filename = os.path.join(features_dir,
image_name + _DELF_EXTENSION)
_, _, descriptors, _, _ = feature_io.ReadFromFile(input_delf_filename)
# If `descriptors` is empty, reshape it to avoid extraction failure.
if not descriptors.shape[0]:
descriptors = np.reshape(descriptors,
[0, config.feature_dimensionality])
num_features_per_box = None
# Extract and save aggregation. If using VLAD, only
# `aggregated_descriptors` needs to be saved.
(aggregated_descriptors,
feature_visual_words) = extractor.Extract(descriptors,
num_features_per_box)
if config.aggregation_type == _VLAD:
datum_io.WriteToFile(aggregated_descriptors,
output_aggregation_filename)
else:
datum_io.WritePairToFile(aggregated_descriptors,
feature_visual_words.astype('uint32'),
output_aggregation_filename)
| 7,621 | 38.28866 | 80 | py |
models | models-master/research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clusters DELF features using the K-means algorithm.
All DELF local feature descriptors for a given dataset's index images are loaded
as the input.
Note that:
- we only use features extracted from whole images (no features from boxes are
used).
- the codebook should be trained on Paris images for Oxford retrieval
experiments, and vice-versa.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import numpy as np
import tensorflow as tf
from delf import feature_io
from delf.python.datasets.revisited_op import dataset
cmd_args = None
# Extensions.
_DELF_EXTENSION = '.delf'
# Default DELF dimensionality.
_DELF_DIM = 128
# Pace to report log when collecting features.
_STATUS_CHECK_ITERATIONS = 100
class _IteratorInitHook(tf.estimator.SessionRunHook):
"""Hook to initialize data iterator after session is created."""
def __init__(self):
super(_IteratorInitHook, self).__init__()
self.iterator_initializer_fn = None
def after_create_session(self, session, coord):
"""Initialize the iterator after the session has been created."""
del coord
self.iterator_initializer_fn(session)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Process output directory.
if tf.io.gfile.exists(cmd_args.output_cluster_dir):
raise RuntimeError(
'output_cluster_dir = %s already exists. This may indicate that a '
'previous run already wrote checkpoints in this directory, which would '
'lead to incorrect training. Please re-run this script by specifying an'
' inexisting directory.' % cmd_args.output_cluster_dir)
else:
tf.io.gfile.makedirs(cmd_args.output_cluster_dir)
# Read list of index images from dataset file.
print('Reading list of index images from dataset file...')
_, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path)
num_images = len(index_list)
print('done! Found %d images' % num_images)
# Loop over list of index images and collect DELF features.
features_for_clustering = []
start = time.clock()
print('Starting to collect features from index images...')
for i in range(num_images):
if i > 0 and i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
print('Processing index image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))
start = time.clock()
features_filename = index_list[i] + _DELF_EXTENSION
features_fullpath = os.path.join(cmd_args.features_dir, features_filename)
_, _, features, _, _ = feature_io.ReadFromFile(features_fullpath)
if features.size != 0:
assert features.shape[1] == _DELF_DIM
for feature in features:
features_for_clustering.append(feature)
features_for_clustering = np.array(features_for_clustering, dtype=np.float32)
print('All features were loaded! There are %d features, each with %d '
'dimensions' %
(features_for_clustering.shape[0], features_for_clustering.shape[1]))
# Run K-means clustering.
def _get_input_fn():
"""Helper function to create input function and hook for training.
Returns:
input_fn: Input function for k-means Estimator training.
init_hook: Hook used to load data during training.
"""
init_hook = _IteratorInitHook()
def _input_fn():
"""Produces tf.data.Dataset object for k-means training.
Returns:
Tensor with the data for training.
"""
features_placeholder = tf.compat.v1.placeholder(
tf.float32, features_for_clustering.shape)
delf_dataset = tf.data.Dataset.from_tensor_slices((features_placeholder))
delf_dataset = delf_dataset.shuffle(1000).batch(
features_for_clustering.shape[0])
iterator = tf.compat.v1.data.make_initializable_iterator(delf_dataset)
def _initializer_fn(sess):
"""Initialize dataset iterator, feed in the data."""
sess.run(
iterator.initializer,
feed_dict={features_placeholder: features_for_clustering})
init_hook.iterator_initializer_fn = _initializer_fn
return iterator.get_next()
return _input_fn, init_hook
input_fn, init_hook = _get_input_fn()
kmeans = tf.compat.v1.estimator.experimental.KMeans(
num_clusters=cmd_args.num_clusters,
model_dir=cmd_args.output_cluster_dir,
use_mini_batch=False,
)
print('Starting K-means clustering...')
start = time.clock()
for i in range(cmd_args.num_iterations):
kmeans.train(input_fn, hooks=[init_hook])
average_sum_squared_error = kmeans.evaluate(
input_fn, hooks=[init_hook])['score'] / features_for_clustering.shape[0]
elapsed = (time.clock() - start)
print('K-means iteration %d (out of %d) took %f seconds, '
'average-sum-of-squares: %f' %
(i, cmd_args.num_iterations, elapsed, average_sum_squared_error))
start = time.clock()
print('K-means clustering finished!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format. The
list of index images loaded from this file is used to collect local
features, which are assumed to be in <image_name>.delf file format.
""")
parser.add_argument(
'--features_dir',
type=str,
default='/tmp/features',
help="""
Directory where DELF feature files are to be found.
""")
parser.add_argument(
'--num_clusters',
type=int,
default=1024,
help="""
Number of clusters to use.
""")
parser.add_argument(
'--num_iterations',
type=int,
default=50,
help="""
Number of iterations to use.
""")
parser.add_argument(
'--output_cluster_dir',
type=str,
default='/tmp/cluster',
help="""
Directory where clustering outputs are written to. This directory should
not exist before running this script; it will be created during
clustering.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 7,190 | 32.446512 | 80 | py |
models | models-master/research/delf/delf/python/normalization_layers/normalization_test.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
import tensorflow as tf
from delf.python.normalization_layers import normalization
class NormalizationsTest(tf.test.TestCase):
def testL2Normalization(self):
x = tf.constant([-4.0, 0.0, 4.0])
layer = normalization.L2Normalization()
# Run tested function.
result = layer(x, axis=0)
# Define expected result.
exp_output = [-0.70710677, 0.0, 0.70710677]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
if __name__ == '__main__':
tf.test.main()
| 1,233 | 32.351351 | 80 | py |
models | models-master/research/delf/delf/python/normalization_layers/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================== | 687 | 48.142857 | 80 | py |
models | models-master/research/delf/delf/python/normalization_layers/normalization.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layer definitions."""
import tensorflow as tf
class L2Normalization(tf.keras.layers.Layer):
"""Normalization layer using L2 norm."""
def __init__(self):
"""Initialization of the L2Normalization layer."""
super(L2Normalization, self).__init__()
# A lower bound value for the norm.
self.eps = 1e-6
def call(self, x, axis=1):
"""Invokes the L2Normalization instance.
Args:
x: A Tensor.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
Returns:
norm: A Tensor with the same shape as `x`.
"""
return tf.nn.l2_normalize(x, axis, epsilon=self.eps)
| 1,348 | 31.902439 | 80 | py |
models | models-master/research/delf/delf/python/delg/extract_features.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts DELG features for images from Revisited Oxford/Paris datasets.
Note that query images are cropped before feature extraction, as required by the
evaluation protocols of these datasets.
The types of extracted features (local and/or global) depend on the input
DelfConfig.
The program checks if features already exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import datum_io
from delf import feature_io
from delf import utils
from delf.python.datasets.revisited_op import dataset
from delf import extractor
FLAGS = flags.FLAGS
flags.DEFINE_string(
'delf_config_path', '/tmp/delf_config_example.pbtxt',
'Path to DelfConfig proto text file with configuration to be used for DELG '
'extraction. Local features are extracted if use_local_features is True; '
'global features are extracted if use_global_features is True.')
flags.DEFINE_string(
'dataset_file_path', '/tmp/gnd_roxford5k.mat',
'Dataset file for Revisited Oxford or Paris dataset, in .mat format.')
flags.DEFINE_string(
'images_dir', '/tmp/images',
'Directory where dataset images are located, all in .jpg format.')
flags.DEFINE_enum('image_set', 'query', ['query', 'index'],
'Whether to extract features from query or index images.')
flags.DEFINE_string(
'output_features_dir', '/tmp/features',
"Directory where DELG features will be written to. Each image's features "
'will be written to files with same name but different extension: the '
'global feature is written to a file with extension .delg_global and the '
'local features are written to a file with extension .delg_local.')
# Extensions.
_DELG_GLOBAL_EXTENSION = '.delg_global'
_DELG_LOCAL_EXTENSION = '.delg_local'
_IMAGE_EXTENSION = '.jpg'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 50
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images from dataset file.
print('Reading list of images from dataset file...')
query_list, index_list, ground_truth = dataset.ReadDatasetFile(
FLAGS.dataset_file_path)
if FLAGS.image_set == 'query':
image_list = query_list
else:
image_list = index_list
num_images = len(image_list)
print('done! Found %d images' % num_images)
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.io.gfile.GFile(FLAGS.delf_config_path, 'r') as f:
text_format.Parse(f.read(), config)
# Create output directory if necessary.
if not tf.io.gfile.exists(FLAGS.output_features_dir):
tf.io.gfile.makedirs(FLAGS.output_features_dir)
extractor_fn = extractor.MakeExtractor(config)
start = time.time()
for i in range(num_images):
if i == 0:
print('Starting to extract features...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print('Processing image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))
start = time.time()
image_name = image_list[i]
input_image_filename = os.path.join(FLAGS.images_dir,
image_name + _IMAGE_EXTENSION)
# Compose output file name and decide if image should be skipped.
should_skip_global = True
should_skip_local = True
if config.use_global_features:
output_global_feature_filename = os.path.join(
FLAGS.output_features_dir, image_name + _DELG_GLOBAL_EXTENSION)
if not tf.io.gfile.exists(output_global_feature_filename):
should_skip_global = False
if config.use_local_features:
output_local_feature_filename = os.path.join(
FLAGS.output_features_dir, image_name + _DELG_LOCAL_EXTENSION)
if not tf.io.gfile.exists(output_local_feature_filename):
should_skip_local = False
if should_skip_global and should_skip_local:
print('Skipping %s' % image_name)
continue
pil_im = utils.RgbLoader(input_image_filename)
resize_factor = 1.0
if FLAGS.image_set == 'query':
# Crop query image according to bounding box.
original_image_size = max(pil_im.size)
bbox = [int(round(b)) for b in ground_truth[i]['bbx']]
pil_im = pil_im.crop(bbox)
cropped_image_size = max(pil_im.size)
resize_factor = cropped_image_size / original_image_size
im = np.array(pil_im)
# Extract and save features.
extracted_features = extractor_fn(im, resize_factor)
if config.use_global_features:
global_descriptor = extracted_features['global_descriptor']
datum_io.WriteToFile(global_descriptor, output_global_feature_filename)
if config.use_local_features:
locations = extracted_features['local_features']['locations']
descriptors = extracted_features['local_features']['descriptors']
feature_scales = extracted_features['local_features']['scales']
attention = extracted_features['local_features']['attention']
feature_io.WriteToFile(output_local_feature_filename, locations,
feature_scales, descriptors, attention)
if __name__ == '__main__':
app.run(main)
| 6,155 | 36.536585 | 80 | py |
models | models-master/research/delf/delf/python/delg/measure_latency.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Times DELF/G extraction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
import numpy as np
from six.moves import range
import tensorflow as tf
from google.protobuf import text_format
from delf import delf_config_pb2
from delf import utils
from delf import extractor
FLAGS = flags.FLAGS
flags.DEFINE_string(
'delf_config_path', '/tmp/delf_config_example.pbtxt',
'Path to DelfConfig proto text file with configuration to be used for DELG '
'extraction. Local features are extracted if use_local_features is True; '
'global features are extracted if use_global_features is True.')
flags.DEFINE_string('list_images_path', '/tmp/list_images.txt',
'Path to list of images whose features will be extracted.')
flags.DEFINE_integer('repeat_per_image', 10,
'Number of times to repeat extraction per image.')
flags.DEFINE_boolean(
'binary_local_features', False,
'Whether to binarize local features after extraction, and take this extra '
'latency into account. This should only be used if use_local_features is '
'set in the input DelfConfig from `delf_config_path`.')
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(FLAGS.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Load images in memory.
print('Loading images, %d times per image...' % FLAGS.repeat_per_image)
im_array = []
for filename in image_paths:
im = np.array(utils.RgbLoader(filename))
for _ in range(FLAGS.repeat_per_image):
im_array.append(im)
np.random.shuffle(im_array)
print('done!')
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.io.gfile.GFile(FLAGS.delf_config_path, 'r') as f:
text_format.Parse(f.read(), config)
extractor_fn = extractor.MakeExtractor(config)
start = time.time()
for i, im in enumerate(im_array):
if i == 0:
print('Starting to extract DELF features from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {len(im_array)}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds,'
f'ie {elapsed/_STATUS_CHECK_ITERATIONS} secs/image.')
start = time.time()
# Extract and save features.
extracted_features = extractor_fn(im)
# Binarize local features, if desired (and if there are local features).
if (config.use_local_features and FLAGS.binary_local_features and
extracted_features['local_features']['attention'].size):
packed_descriptors = np.packbits(
extracted_features['local_features']['descriptors'] > 0, axis=1)
if __name__ == '__main__':
app.run(main)
| 4,098 | 33.158333 | 80 | py |
models | models-master/research/delf/delf/python/delg/perform_retrieval.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Performs DELG-based image retrieval on Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from delf import datum_io
from delf.python.datasets.revisited_op import dataset
from delf.python.detect_to_retrieve import image_reranking
FLAGS = flags.FLAGS
flags.DEFINE_string(
'dataset_file_path', '/tmp/gnd_roxford5k.mat',
'Dataset file for Revisited Oxford or Paris dataset, in .mat format.')
flags.DEFINE_string('query_features_dir', '/tmp/features/query',
'Directory where query DELG features are located.')
flags.DEFINE_string('index_features_dir', '/tmp/features/index',
'Directory where index DELG features are located.')
flags.DEFINE_boolean(
'use_geometric_verification', False,
'If True, performs re-ranking using local feature-based geometric '
'verification.')
flags.DEFINE_float(
'local_descriptor_matching_threshold', 1.0,
'Optional, only used if `use_geometric_verification` is True. '
'Threshold below which a pair of local descriptors is considered '
'a potential match, and will be fed into RANSAC.')
flags.DEFINE_float(
'ransac_residual_threshold', 20.0,
'Optional, only used if `use_geometric_verification` is True. '
'Residual error threshold for considering matches as inliers, used in '
'RANSAC algorithm.')
flags.DEFINE_boolean(
'use_ratio_test', False,
'Optional, only used if `use_geometric_verification` is True. '
'Whether to use ratio test for local feature matching.')
flags.DEFINE_string(
'output_dir', '/tmp/retrieval',
'Directory where retrieval output will be written to. A file containing '
"metrics for this run is saved therein, with file name 'metrics.txt'.")
# Extensions.
_DELG_GLOBAL_EXTENSION = '.delg_global'
_DELG_LOCAL_EXTENSION = '.delg_local'
# Precision-recall ranks to use in metric computation.
_PR_RANKS = (1, 5, 10)
# Pace to log.
_STATUS_CHECK_LOAD_ITERATIONS = 50
# Output file names.
_METRICS_FILENAME = 'metrics.txt'
def _ReadDelgGlobalDescriptors(input_dir, image_list):
"""Reads DELG global features.
Args:
input_dir: Directory where features are located.
image_list: List of image names for which to load features.
Returns:
global_descriptors: NumPy array of shape (len(image_list), D), where D
corresponds to the global descriptor dimensionality.
"""
num_images = len(image_list)
global_descriptors = []
print('Starting to collect global descriptors for %d images...' % num_images)
start = time.time()
for i in range(num_images):
if i > 0 and i % _STATUS_CHECK_LOAD_ITERATIONS == 0:
elapsed = (time.time() - start)
print('Reading global descriptors for image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_LOAD_ITERATIONS, elapsed))
start = time.time()
descriptor_filename = image_list[i] + _DELG_GLOBAL_EXTENSION
descriptor_fullpath = os.path.join(input_dir, descriptor_filename)
global_descriptors.append(datum_io.ReadFromFile(descriptor_fullpath))
return np.array(global_descriptors)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Parse dataset to obtain query/index images, and ground-truth.
print('Parsing dataset...')
query_list, index_list, ground_truth = dataset.ReadDatasetFile(
FLAGS.dataset_file_path)
num_query_images = len(query_list)
num_index_images = len(index_list)
(_, medium_ground_truth,
hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth)
print('done! Found %d queries and %d index images' %
(num_query_images, num_index_images))
# Read global features.
query_global_features = _ReadDelgGlobalDescriptors(FLAGS.query_features_dir,
query_list)
index_global_features = _ReadDelgGlobalDescriptors(FLAGS.index_features_dir,
index_list)
# Compute similarity between query and index images, potentially re-ranking
# with geometric verification.
ranks_before_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
if FLAGS.use_geometric_verification:
medium_ranks_after_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
hard_ranks_after_gv = np.zeros([num_query_images, num_index_images],
dtype='int32')
for i in range(num_query_images):
print('Performing retrieval with query %d (%s)...' % (i, query_list[i]))
start = time.time()
# Compute similarity between global descriptors.
similarities = np.dot(index_global_features, query_global_features[i])
ranks_before_gv[i] = np.argsort(-similarities)
# Re-rank using geometric verification.
if FLAGS.use_geometric_verification:
medium_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification(
input_ranks=ranks_before_gv[i],
initial_scores=similarities,
query_name=query_list[i],
index_names=index_list,
query_features_dir=FLAGS.query_features_dir,
index_features_dir=FLAGS.index_features_dir,
junk_ids=set(medium_ground_truth[i]['junk']),
local_feature_extension=_DELG_LOCAL_EXTENSION,
ransac_seed=0,
descriptor_matching_threshold=FLAGS
.local_descriptor_matching_threshold,
ransac_residual_threshold=FLAGS.ransac_residual_threshold,
use_ratio_test=FLAGS.use_ratio_test)
hard_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification(
input_ranks=ranks_before_gv[i],
initial_scores=similarities,
query_name=query_list[i],
index_names=index_list,
query_features_dir=FLAGS.query_features_dir,
index_features_dir=FLAGS.index_features_dir,
junk_ids=set(hard_ground_truth[i]['junk']),
local_feature_extension=_DELG_LOCAL_EXTENSION,
ransac_seed=0,
descriptor_matching_threshold=FLAGS
.local_descriptor_matching_threshold,
ransac_residual_threshold=FLAGS.ransac_residual_threshold,
use_ratio_test=FLAGS.use_ratio_test)
elapsed = (time.time() - start)
print('done! Retrieval for query %d took %f seconds' % (i, elapsed))
# Create output directory if necessary.
if not tf.io.gfile.exists(FLAGS.output_dir):
tf.io.gfile.makedirs(FLAGS.output_dir)
# Compute metrics.
medium_metrics = dataset.ComputeMetrics(ranks_before_gv, medium_ground_truth,
_PR_RANKS)
hard_metrics = dataset.ComputeMetrics(ranks_before_gv, hard_ground_truth,
_PR_RANKS)
if FLAGS.use_geometric_verification:
medium_metrics_after_gv = dataset.ComputeMetrics(medium_ranks_after_gv,
medium_ground_truth,
_PR_RANKS)
hard_metrics_after_gv = dataset.ComputeMetrics(hard_ranks_after_gv,
hard_ground_truth, _PR_RANKS)
# Write metrics to file.
mean_average_precision_dict = {
'medium': medium_metrics[0],
'hard': hard_metrics[0]
}
mean_precisions_dict = {'medium': medium_metrics[1], 'hard': hard_metrics[1]}
mean_recalls_dict = {'medium': medium_metrics[2], 'hard': hard_metrics[2]}
if FLAGS.use_geometric_verification:
mean_average_precision_dict.update({
'medium_after_gv': medium_metrics_after_gv[0],
'hard_after_gv': hard_metrics_after_gv[0]
})
mean_precisions_dict.update({
'medium_after_gv': medium_metrics_after_gv[1],
'hard_after_gv': hard_metrics_after_gv[1]
})
mean_recalls_dict.update({
'medium_after_gv': medium_metrics_after_gv[2],
'hard_after_gv': hard_metrics_after_gv[2]
})
dataset.SaveMetricsFile(mean_average_precision_dict, mean_precisions_dict,
mean_recalls_dict, _PR_RANKS,
os.path.join(FLAGS.output_dir, _METRICS_FILENAME))
if __name__ == '__main__':
app.run(main)
| 9,144 | 39.644444 | 80 | py |
models | models-master/research/delf/delf/python/pooling_layers/pooling.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers definitions."""
import tensorflow as tf
class MAC(tf.keras.layers.Layer):
"""Global max pooling (MAC) layer.
Maximum Activations of Convolutions (MAC) is simply constructed by
max-pooling over all dimensions per feature map. See
https://arxiv.org/abs/1511.05879 for a reference.
"""
def call(self, x, axis=None):
"""Invokes the MAC pooling instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return mac(x, axis=axis)
class SPoC(tf.keras.layers.Layer):
"""Average pooling (SPoC) layer.
Sum-pooled convolutional features (SPoC) is based on the sum pooling of the
deep features. See https://arxiv.org/pdf/1510.07493.pdf for a reference.
"""
def call(self, x, axis=None):
"""Invokes the SPoC instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return spoc(x, axis)
class GeM(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) layer.
Generalized Mean Pooling (GeM) computes the generalized mean of each
channel in a tensor. See https://arxiv.org/abs/1711.02512 for a reference.
"""
def __init__(self, power=3.):
"""Initialization of the generalized mean pooling (GeM) layer.
Args:
power: Float power > 0 is an inverse exponent parameter, used during the
generalized mean pooling computation. Setting this exponent as power > 1
increases the contrast of the pooled feature map and focuses on the
salient features of the image. GeM is a generalization of the average
pooling commonly used in classification networks (power = 1) and of
spatial max-pooling layer (power = inf).
"""
super(GeM, self).__init__()
self.power = power
self.eps = 1e-6
def call(self, x, axis=None):
"""Invokes the GeM instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return gem(x, power=self.power, eps=self.eps, axis=axis)
class GeMPooling2D(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) pooling operation for spatial data."""
def __init__(self,
power=20.,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format='channels_last'):
"""Initialization of GeMPooling2D.
Args:
power: Float, power > 0. is an inverse exponent parameter (GeM power).
pool_size: Integer or tuple of 2 integers, factors by which to downscale
(vertical, horizontal)
strides: Integer, tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`.
padding: One of `valid` or `same`. `valid` means no padding. `same`
results in padding evenly to the left/right or up/down of the input such
that output has the same height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height, width,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, height, width)`.
"""
super(GeMPooling2D, self).__init__()
self.power = power
self.eps = 1e-6
self.pool_size = pool_size
self.strides = strides
self.padding = padding.upper()
data_format_conv = {
'channels_last': 'NHWC',
'channels_first': 'NCHW',
}
self.data_format = data_format_conv[data_format]
def call(self, x):
tmp = tf.pow(x, self.power)
tmp = tf.nn.avg_pool(tmp, self.pool_size, self.strides, self.padding,
self.data_format)
out = tf.pow(tmp, 1. / self.power)
return out
def mac(x, axis=None):
"""Performs global max pooling (MAC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_max(x, axis=axis, keepdims=False)
def spoc(x, axis=None):
"""Performs average pooling (SPoC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_mean(x, axis=axis, keepdims=False)
def gem(x, axis=None, power=3., eps=1e-6):
"""Performs generalized mean pooling (GeM).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
power: Float, power > 0 is an inverse exponent parameter (GeM power).
eps: Float, parameter for numerical stability.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
tmp = tf.pow(tf.maximum(x, eps), power)
out = tf.pow(tf.reduce_mean(tmp, axis=axis, keepdims=False), 1. / power)
return out
| 6,125 | 30.415385 | 80 | py |
models | models-master/research/delf/delf/python/pooling_layers/pooling_test.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pooling layers."""
import tensorflow as tf
from delf.python.pooling_layers import pooling
class PoolingsTest(tf.test.TestCase):
def testMac(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.mac(x)
# Define expected result.
exp_output = [[6., 7.]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testSpoc(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.spoc(x)
# Define expected result.
exp_output = [[3., 4.]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testGem(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.gem(x, power=3., eps=1e-6)
# Define expected result.
exp_output = [[4.1601677, 4.9866314]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testGeMPooling2D(self):
# Create a testing tensor.
x = tf.constant([[[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]])
x = tf.reshape(x, [1, 3, 3, 1])
# Checking GeMPooling2D relation to MaxPooling2D for the large values of
# `p`.
max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_max = max_pool_2d(x)
gem_pool_2d = pooling.GeMPooling2D(power=30., pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_gem_max = gem_pool_2d(x)
# Check that for large `p` GeMPooling2D is close to MaxPooling2D.
self.assertAllEqual(out_max, tf.round(out_gem_max))
# Checking GeMPooling2D relation to AveragePooling2D for the value
# of `p` = 1.
avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='valid')
out_avg = avg_pool_2d(x)
gem_pool_2d = pooling.GeMPooling2D(power=1., pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_gem_avg = gem_pool_2d(x)
# Check that for `p` equals 1., GeMPooling2D becomes AveragePooling2D.
self.assertAllEqual(out_avg, out_gem_avg)
if __name__ == '__main__':
tf.test.main()
| 3,148 | 36.047059 | 80 | py |
models | models-master/research/delf/delf/python/pooling_layers/__init__.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================== | 687 | 48.142857 | 80 | py |
models | models-master/research/delf/delf/protos/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/preprocessing.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Preprocesses pretrained word embeddings, creates dev sets for tasks without a
provided one, and figures out the set of output classes for each task.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from base import configure
from base import embeddings
from base import utils
from task_specific.word_level import word_level_data
def main(data_dir='./data'):
random.seed(0)
utils.log("BUILDING WORD VOCABULARY/EMBEDDINGS")
for pretrained in ['glove.6B.300d.txt']:
config = configure.Config(data_dir=data_dir,
for_preprocessing=True,
pretrained_embeddings=pretrained,
word_embedding_size=300)
embeddings.PretrainedEmbeddingLoader(config).build()
utils.log("CONSTRUCTING DEV SETS")
for task_name in ["chunk"]:
# chunking does not come with a provided dev split, so create one by
# selecting a random subset of the data
config = configure.Config(data_dir=data_dir,
for_preprocessing=True)
task_data_dir = os.path.join(config.raw_data_topdir, task_name) + '/'
train_sentences = word_level_data.TaggedDataLoader(
config, task_name, False).get_labeled_sentences("train")
random.shuffle(train_sentences)
write_sentences(task_data_dir + 'train_subset.txt', train_sentences[1500:])
write_sentences(task_data_dir + 'dev.txt', train_sentences[:1500])
utils.log("WRITING LABEL MAPPINGS")
for task_name in ["chunk"]:
for i, label_encoding in enumerate(["BIOES"]):
config = configure.Config(data_dir=data_dir,
for_preprocessing=True,
label_encoding=label_encoding)
token_level = task_name in ["ccg", "pos", "depparse"]
loader = word_level_data.TaggedDataLoader(config, task_name, token_level)
if token_level:
if i != 0:
continue
utils.log("WRITING LABEL MAPPING FOR", task_name.upper())
else:
utils.log(" Writing label mapping for", task_name.upper(),
label_encoding)
utils.log(" ", len(loader.label_mapping), "classes")
utils.write_cpickle(loader.label_mapping,
loader.label_mapping_path)
def write_sentences(fname, sentences):
with open(fname, 'w') as f:
for words, tags in sentences:
for word, tag in zip(words, tags):
f.write(word + " " + tag + "\n")
f.write("\n")
if __name__ == '__main__':
main()
| 3,280 | 36.284091 | 80 | py |
models | models-master/research/cvt_text/cvt.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run training and evaluation for CVT text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from base import configure
from base import utils
from training import trainer
from training import training_progress
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('mode', 'train', '"train" or "eval')
tf.app.flags.DEFINE_string('model_name', 'default_model',
'A name identifying the model being '
'trained/evaluated')
def main():
utils.heading('SETUP')
config = configure.Config(mode=FLAGS.mode, model_name=FLAGS.model_name)
config.write()
with tf.Graph().as_default() as graph:
model_trainer = trainer.Trainer(config)
summary_writer = tf.summary.FileWriter(config.summaries_dir)
checkpoints_saver = tf.train.Saver(max_to_keep=1)
best_model_saver = tf.train.Saver(max_to_keep=1)
init_op = tf.global_variables_initializer()
graph.finalize()
with tf.Session() as sess:
sess.run(init_op)
progress = training_progress.TrainingProgress(
config, sess, checkpoints_saver, best_model_saver,
config.mode == 'train')
utils.log()
if config.mode == 'train':
utils.heading('START TRAINING ({:})'.format(config.model_name))
model_trainer.train(sess, progress, summary_writer)
elif config.mode == 'eval':
utils.heading('RUN EVALUATION ({:})'.format(config.model_name))
progress.best_model_saver.restore(sess, tf.train.latest_checkpoint(
config.checkpoints_dir))
model_trainer.evaluate_all_tasks(sess, summary_writer, None)
else:
raise ValueError('Mode must be "train" or "eval"')
if __name__ == '__main__':
main()
| 2,501 | 35.794118 | 80 | py |
models | models-master/research/cvt_text/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/corpus_processing/example.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for training examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from base import embeddings
CONTRACTION_WORDS = set(w + 'n' for w in
['do', 'does', 'did', 'is', 'are', 'was', 'were', 'has',
'have', 'had', 'could', 'would', 'should', 'ca', 'wo',
'ai', 'might'])
class Example(object):
def __init__(self, words, word_vocab, char_vocab):
words = words[:]
# Fix inconsistent tokenization between datasets
for i in range(len(words)):
if (words[i].lower() == '\'t' and i > 0 and
words[i - 1].lower() in CONTRACTION_WORDS):
words[i] = words[i - 1][-1] + words[i]
words[i - 1] = words[i - 1][:-1]
self.words = ([embeddings.START] +
[word_vocab[embeddings.normalize_word(w)] for w in words] +
[embeddings.END])
self.chars = ([[embeddings.MISSING]] +
[[char_vocab[c] for c in embeddings.normalize_chars(w)]
for w in words] +
[[embeddings.MISSING]])
def __repr__(self,):
inv_char_vocab = embeddings.get_inv_char_vocab()
return ' '.join([''.join([inv_char_vocab[c] for c in w])
for w in self.chars])
| 2,020 | 37.132075 | 80 | py |
models | models-master/research/cvt_text/corpus_processing/scorer.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class Scorer(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self._updated = False
self._cached_results = {}
@abc.abstractmethod
def update(self, examples, predictions, loss):
self._updated = True
@abc.abstractmethod
def get_loss(self):
pass
@abc.abstractmethod
def _get_results(self):
return []
def get_results(self, prefix=""):
results = self._get_results() if self._updated else self._cached_results
self._cached_results = results
self._updated = False
return [(prefix + k, v) for k, v in results]
def results_str(self):
return " - ".join(["{:}: {:.2f}".format(k, v)
for k, v in self.get_results()])
| 1,556 | 28.377358 | 80 | py |
models | models-master/research/cvt_text/corpus_processing/unlabeled_data.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads data from a large unlabeled corpus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from base import embeddings
from corpus_processing import example
from corpus_processing import minibatching
class UnlabeledDataReader(object):
def __init__(self, config, starting_file=0, starting_line=0, one_pass=False):
self.config = config
self.current_file = starting_file
self.current_line = starting_line
self._one_pass = one_pass
def endless_minibatches(self):
for examples in self.get_unlabeled_examples():
d = minibatching.Dataset(self.config, examples, 'unlabeled')
for mb in d.get_minibatches(self.config.train_batch_size):
yield mb
def _make_examples(self, sentences):
word_vocab = embeddings.get_word_vocab(self.config)
char_vocab = embeddings.get_char_vocab()
return [
example.Example(sentence, word_vocab, char_vocab)
for sentence in sentences
]
def get_unlabeled_examples(self):
lines = []
for words in self.get_unlabeled_sentences():
lines.append(words)
if len(lines) >= 10000:
yield self._make_examples(lines)
lines = []
def get_unlabeled_sentences(self):
while True:
file_ids_and_names = sorted([
(int(fname.split('-')[1].replace('.txt', '')), fname) for fname in
tf.gfile.ListDirectory(self.config.unsupervised_data)])
for fid, fname in file_ids_and_names:
if fid < self.current_file:
continue
self.current_file = fid
self.current_line = 0
with tf.gfile.FastGFile(os.path.join(self.config.unsupervised_data,
fname), 'r') as f:
for i, line in enumerate(f):
if i < self.current_line:
continue
self.current_line = i
words = line.strip().split()
if len(words) < self.config.max_sentence_length:
yield words
self.current_file = 0
self.current_line = 0
if self._one_pass:
break
| 2,834 | 33.573171 | 80 | py |
models | models-master/research/cvt_text/corpus_processing/minibatching.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for constructing minibatches."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from base import embeddings
def get_bucket(config, l):
for i, (s, e) in enumerate(config.buckets):
if s <= l < e:
return config.buckets[i]
def build_array(nested_lists, dtype='int32'):
depth_to_sizes = collections.defaultdict(set)
_get_sizes(nested_lists, depth_to_sizes)
shape = [max(depth_to_sizes[depth]) for depth in range(len(depth_to_sizes))]
copy_depth = len(depth_to_sizes) - 1
while copy_depth > 0 and len(depth_to_sizes[copy_depth]) == 1:
copy_depth -= 1
arr = np.zeros(shape, dtype=dtype)
_fill_array(nested_lists, arr, copy_depth)
return arr
def _get_sizes(nested_lists, depth_to_sizes, depth=0):
depth_to_sizes[depth].add(len(nested_lists))
first_elem = nested_lists[0]
if (isinstance(first_elem, collections.Sequence) or
isinstance(first_elem, np.ndarray)):
for sublist in nested_lists:
_get_sizes(sublist, depth_to_sizes, depth + 1)
def _fill_array(nested_lists, arr, copy_depth, depth=0):
if depth == copy_depth:
for i in range(len(nested_lists)):
if isinstance(nested_lists[i], np.ndarray):
arr[i] = nested_lists[i]
else:
arr[i] = np.array(nested_lists[i])
else:
for i in range(len(nested_lists)):
_fill_array(nested_lists[i], arr[i], copy_depth, depth + 1)
class Dataset(object):
def __init__(self, config, examples, task_name='unlabeled', is_training=False):
self._config = config
self.examples = examples
self.size = len(examples)
self.task_name = task_name
self.is_training = is_training
def get_minibatches(self, minibatch_size):
by_bucket = collections.defaultdict(list)
for i, e in enumerate(self.examples):
by_bucket[get_bucket(self._config, len(e.words))].append(i)
# save memory by weighting examples so longer sentences have
# smaller minibatches.
weight = lambda ind: np.sqrt(len(self.examples[ind].words))
total_weight = float(sum(weight(i) for i in range(len(self.examples))))
weight_per_batch = minibatch_size * total_weight / len(self.examples)
cumulative_weight = 0.0
id_batches = []
for _, ids in by_bucket.iteritems():
ids = np.array(ids)
np.random.shuffle(ids)
curr_batch, curr_weight = [], 0.0
for i, curr_id in enumerate(ids):
curr_batch.append(curr_id)
curr_weight += weight(curr_id)
if (i == len(ids) - 1 or cumulative_weight + curr_weight >=
(len(id_batches) + 1) * weight_per_batch):
cumulative_weight += curr_weight
id_batches.append(np.array(curr_batch))
curr_batch, curr_weight = [], 0.0
random.shuffle(id_batches)
for id_batch in id_batches:
yield self._make_minibatch(id_batch)
def endless_minibatches(self, minibatch_size):
while True:
for mb in self.get_minibatches(minibatch_size):
yield mb
def _make_minibatch(self, ids):
examples = [self.examples[i] for i in ids]
sentence_lengths = np.array([len(e.words) for e in examples])
max_word_length = min(max(max(len(word) for word in e.chars)
for e in examples),
self._config.max_word_length)
characters = [[[embeddings.PAD] + [embeddings.START] + w[:max_word_length] +
[embeddings.END] + [embeddings.PAD] for w in e.chars]
for e in examples]
# the first and last words are masked because they are start/end tokens
mask = build_array([[0] + [1] * (length - 2) + [0]
for length in sentence_lengths])
words = build_array([e.words for e in examples])
chars = build_array(characters, dtype='int16')
return Minibatch(
task_name=self.task_name,
size=ids.size,
examples=examples,
ids=ids,
teacher_predictions={},
words=words,
chars=chars,
lengths=sentence_lengths,
mask=mask,
)
Minibatch = collections.namedtuple('Minibatch', [
'task_name', 'size', 'examples', 'ids', 'teacher_predictions',
'words', 'chars', 'lengths', 'mask'
])
| 4,972 | 33.534722 | 81 | py |
models | models-master/research/cvt_text/corpus_processing/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/training/training_progress.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Tracks and saves training progress (models and other data such as the current
location in the lm1b corpus) for later reloading.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from base import utils
from corpus_processing import unlabeled_data
class TrainingProgress(object):
def __init__(self, config, sess, checkpoint_saver, best_model_saver,
restore_if_possible=True):
self.config = config
self.checkpoint_saver = checkpoint_saver
self.best_model_saver = best_model_saver
tf.gfile.MakeDirs(config.checkpoints_dir)
if restore_if_possible and tf.gfile.Exists(config.progress):
history, current_file, current_line = utils.load_cpickle(
config.progress, memoized=False)
self.history = history
self.unlabeled_data_reader = unlabeled_data.UnlabeledDataReader(
config, current_file, current_line)
utils.log("Continuing from global step", dict(self.history[-1])["step"],
"(lm1b file {:}, line {:})".format(current_file, current_line))
self.checkpoint_saver.restore(sess, tf.train.latest_checkpoint(
self.config.checkpoints_dir))
else:
utils.log("No previous checkpoint found - starting from scratch")
self.history = []
self.unlabeled_data_reader = (
unlabeled_data.UnlabeledDataReader(config))
def write(self, sess, global_step):
self.checkpoint_saver.save(sess, self.config.checkpoint,
global_step=global_step)
utils.write_cpickle(
(self.history, self.unlabeled_data_reader.current_file,
self.unlabeled_data_reader.current_line),
self.config.progress)
def save_if_best_dev_model(self, sess, global_step):
best_avg_score = 0
for i, results in enumerate(self.history):
if any("train" in metric for metric, value in results):
continue
total, count = 0, 0
for metric, value in results:
if "f1" in metric or "las" in metric or "accuracy" in metric:
total += value
count += 1
avg_score = total / count
if avg_score >= best_avg_score:
best_avg_score = avg_score
if i == len(self.history) - 1:
utils.log("New best model! Saving...")
self.best_model_saver.save(sess, self.config.best_model_checkpoint,
global_step=global_step)
| 3,163 | 38.55 | 80 | py |
models | models-master/research/cvt_text/training/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/training/trainer.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs training for CVT text models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import time
import numpy as np
import tensorflow as tf
from base import utils
from model import multitask_model
from task_specific import task_definitions
class Trainer(object):
def __init__(self, config):
self._config = config
self.tasks = [task_definitions.get_task(self._config, task_name)
for task_name in self._config.task_names]
utils.log('Loading Pretrained Embeddings')
pretrained_embeddings = utils.load_cpickle(self._config.word_embeddings)
utils.log('Building Model')
self._model = multitask_model.Model(
self._config, pretrained_embeddings, self.tasks)
utils.log()
def train(self, sess, progress, summary_writer):
heading = lambda s: utils.heading(s, '(' + self._config.model_name + ')')
trained_on_sentences = 0
start_time = time.time()
unsupervised_loss_total, unsupervised_loss_count = 0, 0
supervised_loss_total, supervised_loss_count = 0, 0
for mb in self._get_training_mbs(progress.unlabeled_data_reader):
if mb.task_name != 'unlabeled':
loss = self._model.train_labeled(sess, mb)
supervised_loss_total += loss
supervised_loss_count += 1
if mb.task_name == 'unlabeled':
self._model.run_teacher(sess, mb)
loss = self._model.train_unlabeled(sess, mb)
unsupervised_loss_total += loss
unsupervised_loss_count += 1
mb.teacher_predictions.clear()
trained_on_sentences += mb.size
global_step = self._model.get_global_step(sess)
if global_step % self._config.print_every == 0:
utils.log('step {:} - '
'supervised loss: {:.2f} - '
'unsupervised loss: {:.2f} - '
'{:.1f} sentences per second'.format(
global_step,
supervised_loss_total / max(1, supervised_loss_count),
unsupervised_loss_total / max(1, unsupervised_loss_count),
trained_on_sentences / (time.time() - start_time)))
unsupervised_loss_total, unsupervised_loss_count = 0, 0
supervised_loss_total, supervised_loss_count = 0, 0
if global_step % self._config.eval_dev_every == 0:
heading('EVAL ON DEV')
self.evaluate_all_tasks(sess, summary_writer, progress.history)
progress.save_if_best_dev_model(sess, global_step)
utils.log()
if global_step % self._config.eval_train_every == 0:
heading('EVAL ON TRAIN')
self.evaluate_all_tasks(sess, summary_writer, progress.history, True)
utils.log()
if global_step % self._config.save_model_every == 0:
heading('CHECKPOINTING MODEL')
progress.write(sess, global_step)
utils.log()
def evaluate_all_tasks(self, sess, summary_writer, history, train_set=False):
for task in self.tasks:
results = self._evaluate_task(sess, task, summary_writer, train_set)
if history is not None:
results.append(('step', self._model.get_global_step(sess)))
history.append(results)
if history is not None:
utils.write_cpickle(history, self._config.history_file)
def _evaluate_task(self, sess, task, summary_writer, train_set):
scorer = task.get_scorer()
data = task.train_set if train_set else task.val_set
for i, mb in enumerate(data.get_minibatches(self._config.test_batch_size)):
loss, batch_preds = self._model.test(sess, mb)
scorer.update(mb.examples, batch_preds, loss)
results = scorer.get_results(task.name +
('_train_' if train_set else '_dev_'))
utils.log(task.name.upper() + ': ' + scorer.results_str())
write_summary(summary_writer, results,
global_step=self._model.get_global_step(sess))
return results
def _get_training_mbs(self, unlabeled_data_reader):
datasets = [task.train_set for task in self.tasks]
weights = [np.sqrt(dataset.size) for dataset in datasets]
thresholds = np.cumsum([w / np.sum(weights) for w in weights])
labeled_mbs = [dataset.endless_minibatches(self._config.train_batch_size)
for dataset in datasets]
unlabeled_mbs = unlabeled_data_reader.endless_minibatches()
while True:
dataset_ind = bisect.bisect(thresholds, np.random.random())
yield next(labeled_mbs[dataset_ind])
if self._config.is_semisup:
yield next(unlabeled_mbs)
def write_summary(writer, results, global_step):
for k, v in results:
if 'f1' in k or 'acc' in k or 'loss' in k:
writer.add_summary(tf.Summary(
value=[tf.Summary.Value(tag=k, simple_value=v)]), global_step)
writer.flush()
| 5,474 | 38.107143 | 80 | py |
models | models-master/research/cvt_text/task_specific/task_definitions.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines all the tasks the model can learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from base import embeddings
from task_specific.word_level import depparse_module
from task_specific.word_level import depparse_scorer
from task_specific.word_level import tagging_module
from task_specific.word_level import tagging_scorers
from task_specific.word_level import word_level_data
class Task(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, loader):
self.config = config
self.name = name
self.loader = loader
self.train_set = self.loader.get_dataset("train")
self.val_set = self.loader.get_dataset("dev" if config.dev_set else "test")
@abc.abstractmethod
def get_module(self, inputs, encoder):
pass
@abc.abstractmethod
def get_scorer(self):
pass
class Tagging(Task):
def __init__(self, config, name, is_token_level=True):
super(Tagging, self).__init__(
config, name, word_level_data.TaggedDataLoader(
config, name, is_token_level))
self.n_classes = len(set(self.loader.label_mapping.values()))
self.is_token_level = is_token_level
def get_module(self, inputs, encoder):
return tagging_module.TaggingModule(
self.config, self.name, self.n_classes, inputs, encoder)
def get_scorer(self):
if self.is_token_level:
return tagging_scorers.AccuracyScorer()
else:
return tagging_scorers.EntityLevelF1Scorer(self.loader.label_mapping)
class DependencyParsing(Tagging):
def __init__(self, config, name):
super(DependencyParsing, self).__init__(config, name, True)
def get_module(self, inputs, encoder):
return depparse_module.DepparseModule(
self.config, self.name, self.n_classes, inputs, encoder)
def get_scorer(self):
return depparse_scorer.DepparseScorer(
self.n_classes, (embeddings.get_punctuation_ids(self.config)))
def get_task(config, name):
if name in ["ccg", "pos"]:
return Tagging(config, name, True)
elif name in ["chunk", "ner", "er"]:
return Tagging(config, name, False)
elif name == "depparse":
return DependencyParsing(config, name)
else:
raise ValueError("Unknown task", name)
| 2,961 | 31.195652 | 80 | py |
models | models-master/research/cvt_text/task_specific/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/task_specific/word_level/depparse_module.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dependency parsing module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from corpus_processing import minibatching
from model import model_helpers
from model import task_module
class DepparseModule(task_module.SemiSupervisedModule):
def __init__(self, config, task_name, n_classes, inputs, encoder):
super(DepparseModule, self).__init__()
self.task_name = task_name
self.n_classes = n_classes
self.labels = labels = tf.placeholder(tf.float32, [None, None, None],
name=task_name + '_labels')
class PredictionModule(object):
def __init__(self, name, dep_reprs, head_reprs, roll_direction=0):
self.name = name
with tf.variable_scope(name + '/predictions'):
# apply hidden layers to the input representations
arc_dep_hidden = model_helpers.project(
dep_reprs, config.projection_size, 'arc_dep_hidden')
arc_head_hidden = model_helpers.project(
head_reprs, config.projection_size, 'arc_head_hidden')
arc_dep_hidden = tf.nn.relu(arc_dep_hidden)
arc_head_hidden = tf.nn.relu(arc_head_hidden)
arc_head_hidden = tf.nn.dropout(arc_head_hidden, inputs.keep_prob)
arc_dep_hidden = tf.nn.dropout(arc_dep_hidden, inputs.keep_prob)
# bilinear classifier excluding the final dot product
arc_head = tf.layers.dense(
arc_head_hidden, config.depparse_projection_size, name='arc_head')
W = tf.get_variable('shared_W',
shape=[config.projection_size, n_classes,
config.depparse_projection_size])
Wr = tf.get_variable('relation_specific_W',
shape=[config.projection_size,
config.depparse_projection_size])
Wr_proj = tf.tile(tf.expand_dims(Wr, axis=-2), [1, n_classes, 1])
W += Wr_proj
arc_dep = tf.tensordot(arc_dep_hidden, W, axes=[[-1], [0]])
shape = tf.shape(arc_dep)
arc_dep = tf.reshape(arc_dep,
[shape[0], -1, config.depparse_projection_size])
# apply the transformer scaling trick to prevent dot products from
# getting too large (possibly not necessary)
scale = np.power(
config.depparse_projection_size, 0.25).astype('float32')
scale = tf.get_variable('scale', initializer=scale, dtype=tf.float32)
arc_dep /= scale
arc_head /= scale
# compute the scores for each candidate arc
word_scores = tf.matmul(arc_head, arc_dep, transpose_b=True)
root_scores = tf.layers.dense(arc_head, n_classes, name='root_score')
arc_scores = tf.concat([root_scores, word_scores], axis=-1)
# disallow the model from making impossible predictions
mask = inputs.mask
mask_shape = tf.shape(mask)
mask = tf.tile(tf.expand_dims(mask, -1), [1, 1, n_classes])
mask = tf.reshape(mask, [-1, mask_shape[1] * n_classes])
mask = tf.concat([tf.ones((mask_shape[0], 1)),
tf.zeros((mask_shape[0], n_classes - 1)), mask],
axis=1)
mask = tf.tile(tf.expand_dims(mask, 1), [1, mask_shape[1], 1])
arc_scores += (mask - 1) * 100.0
self.logits = arc_scores
self.loss = model_helpers.masked_ce_loss(
self.logits, labels, inputs.mask,
roll_direction=roll_direction)
primary = PredictionModule(
'primary',
[encoder.uni_reprs, encoder.bi_reprs],
[encoder.uni_reprs, encoder.bi_reprs])
ps = [
PredictionModule(
'full',
[encoder.uni_reprs, encoder.bi_reprs],
[encoder.uni_reprs, encoder.bi_reprs]),
PredictionModule('fw_fw', [encoder.uni_fw], [encoder.uni_fw]),
PredictionModule('fw_bw', [encoder.uni_fw], [encoder.uni_bw]),
PredictionModule('bw_fw', [encoder.uni_bw], [encoder.uni_fw]),
PredictionModule('bw_bw', [encoder.uni_bw], [encoder.uni_bw]),
]
self.unsupervised_loss = sum(p.loss for p in ps)
self.supervised_loss = primary.loss
self.probs = tf.nn.softmax(primary.logits)
self.preds = tf.argmax(primary.logits, axis=-1)
def update_feed_dict(self, feed, mb):
if self.task_name in mb.teacher_predictions:
feed[self.labels] = mb.teacher_predictions[self.task_name]
elif mb.task_name != 'unlabeled':
labels = minibatching.build_array(
[[0] + e.labels + [0] for e in mb.examples])
feed[self.labels] = np.eye(
(1 + mb.words.shape[1]) * self.n_classes)[labels]
| 5,544 | 42.661417 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/tagging_utils.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for sequence tagging tasks for entity-level tasks (e.g., NER)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_span_labels(sentence_tags, inv_label_mapping=None):
"""Go from token-level labels to list of entities (start, end, class)."""
if inv_label_mapping:
sentence_tags = [inv_label_mapping[i] for i in sentence_tags]
span_labels = []
last = 'O'
start = -1
for i, tag in enumerate(sentence_tags):
pos, _ = (None, 'O') if tag == 'O' else tag.split('-')
if (pos == 'S' or pos == 'B' or tag == 'O') and last != 'O':
span_labels.append((start, i - 1, last.split('-')[-1]))
if pos == 'B' or pos == 'S' or last == 'O':
start = i
last = tag
if sentence_tags[-1] != 'O':
span_labels.append((start, len(sentence_tags) - 1,
sentence_tags[-1].split('-')[-1]))
return span_labels
def get_tags(span_labels, length, encoding):
"""Converts a list of entities to token-label labels based on the provided
encoding (e.g., BIOES).
"""
tags = ['O' for _ in range(length)]
for s, e, t in span_labels:
for i in range(s, e + 1):
tags[i] = 'I-' + t
if 'E' in encoding:
tags[e] = 'E-' + t
if 'B' in encoding:
tags[s] = 'B-' + t
if 'S' in encoding and s == e:
tags[s] = 'S-' + t
return tags
| 2,077 | 33.633333 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/tagging_scorers.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence tagging evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from task_specific.word_level import tagging_utils
from task_specific.word_level import word_level_scorer
class AccuracyScorer(word_level_scorer.WordLevelScorer):
def __init__(self, auto_fail_label=None):
super(AccuracyScorer, self).__init__()
self._auto_fail_label = auto_fail_label
def _get_results(self):
correct, count = 0, 0
for example, preds in zip(self._examples, self._preds):
for y_true, y_pred in zip(example.labels, preds):
count += 1
correct += (1 if y_pred == y_true and y_true != self._auto_fail_label
else 0)
return [
("accuracy", 100.0 * correct / count),
("loss", self.get_loss())
]
class F1Scorer(word_level_scorer.WordLevelScorer):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(F1Scorer, self).__init__()
self._n_correct, self._n_predicted, self._n_gold = 0, 0, 0
def _get_results(self):
if self._n_correct == 0:
p, r, f1 = 0, 0, 0
else:
p = 100.0 * self._n_correct / self._n_predicted
r = 100.0 * self._n_correct / self._n_gold
f1 = 2 * p * r / (p + r)
return [
("precision", p),
("recall", r),
("f1", f1),
("loss", self.get_loss()),
]
class EntityLevelF1Scorer(F1Scorer):
def __init__(self, label_mapping):
super(EntityLevelF1Scorer, self).__init__()
self._inv_label_mapping = {v: k for k, v in label_mapping.iteritems()}
def _get_results(self):
self._n_correct, self._n_predicted, self._n_gold = 0, 0, 0
for example, preds in zip(self._examples, self._preds):
sent_spans = set(tagging_utils.get_span_labels(
example.labels, self._inv_label_mapping))
span_preds = set(tagging_utils.get_span_labels(
preds, self._inv_label_mapping))
self._n_correct += len(sent_spans & span_preds)
self._n_gold += len(sent_spans)
self._n_predicted += len(span_preds)
return super(EntityLevelF1Scorer, self)._get_results()
| 2,837 | 32.785714 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/word_level_data.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for processing word-level datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import tensorflow as tf
from base import embeddings
from base import utils
from corpus_processing import example
from corpus_processing import minibatching
from task_specific.word_level import tagging_utils
class TaggedDataLoader(object):
def __init__(self, config, name, is_token_level):
self._config = config
self._task_name = name
self._raw_data_path = os.path.join(config.raw_data_topdir, name)
self._is_token_level = is_token_level
self.label_mapping_path = os.path.join(
config.preprocessed_data_topdir,
(name if is_token_level else
name + '_' + config.label_encoding) + '_label_mapping.pkl')
if self.label_mapping:
self._n_classes = len(set(self.label_mapping.values()))
else:
self._n_classes = None
def get_dataset(self, split):
if (split == 'train' and not self._config.for_preprocessing and
tf.gfile.Exists(os.path.join(self._raw_data_path, 'train_subset.txt'))):
split = 'train_subset'
return minibatching.Dataset(
self._config, self._get_examples(split), self._task_name)
def get_labeled_sentences(self, split):
sentences = []
path = os.path.join(self._raw_data_path, split + '.txt')
if not tf.gfile.Exists(path):
if self._config.for_preprocessing:
return []
else:
raise ValueError('Unable to load data from', path)
with tf.gfile.GFile(path, 'r') as f:
sentence = []
for line in f:
line = line.strip().split()
if not line:
if sentence:
words, tags = zip(*sentence)
sentences.append((words, tags))
sentence = []
continue
if line[0] == '-DOCSTART-':
continue
word, tag = line[0], line[-1]
sentence.append((word, tag))
return sentences
@property
def label_mapping(self):
if not self._config.for_preprocessing:
return utils.load_cpickle(self.label_mapping_path)
tag_counts = collections.Counter()
train_tags = set()
for split in ['train', 'dev', 'test']:
for words, tags in self.get_labeled_sentences(split):
if not self._is_token_level:
span_labels = tagging_utils.get_span_labels(tags)
tags = tagging_utils.get_tags(
span_labels, len(words), self._config.label_encoding)
for tag in tags:
if self._task_name == 'depparse':
tag = tag.split('-')[1]
tag_counts[tag] += 1
if split == 'train':
train_tags.add(tag)
if self._task_name == 'ccg':
# for CCG, there are tags in the test sets that aren't in the train set
# all tags not in the train set get mapped to a special label
# the model will never predict this label because it never sees it in the
# training set
not_in_train_tags = []
for tag, count in tag_counts.items():
if tag not in train_tags:
not_in_train_tags.append(tag)
label_mapping = {
label: i for i, label in enumerate(sorted(filter(
lambda t: t not in not_in_train_tags, tag_counts.keys())))
}
n = len(label_mapping)
for tag in not_in_train_tags:
label_mapping[tag] = n
else:
labels = sorted(tag_counts.keys())
if self._task_name == 'depparse':
labels.remove('root')
labels.insert(0, 'root')
label_mapping = {label: i for i, label in enumerate(labels)}
return label_mapping
def _get_examples(self, split):
word_vocab = embeddings.get_word_vocab(self._config)
char_vocab = embeddings.get_char_vocab()
examples = [
TaggingExample(
self._config, self._is_token_level, words, tags,
word_vocab, char_vocab, self.label_mapping, self._task_name)
for words, tags in self.get_labeled_sentences(split)]
if self._config.train_set_percent < 100:
utils.log('using reduced train set ({:}%)'.format(
self._config.train_set_percent))
random.shuffle(examples)
examples = examples[:int(len(examples) *
self._config.train_set_percent / 100.0)]
return examples
class TaggingExample(example.Example):
def __init__(self, config, is_token_level, words, original_tags,
word_vocab, char_vocab, label_mapping, task_name):
super(TaggingExample, self).__init__(words, word_vocab, char_vocab)
if is_token_level:
labels = original_tags
else:
span_labels = tagging_utils.get_span_labels(original_tags)
labels = tagging_utils.get_tags(
span_labels, len(words), config.label_encoding)
if task_name == 'depparse':
self.labels = []
for l in labels:
split = l.split('-')
self.labels.append(
len(label_mapping) * (0 if split[0] == '0' else 1 + int(split[0]))
+ label_mapping[split[1]])
else:
self.labels = [label_mapping[l] for l in labels]
| 5,835 | 35.024691 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/tagging_module.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence tagging module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from corpus_processing import minibatching
from model import model_helpers
from model import task_module
class TaggingModule(task_module.SemiSupervisedModule):
def __init__(self, config, task_name, n_classes, inputs,
encoder):
super(TaggingModule, self).__init__()
self.task_name = task_name
self.n_classes = n_classes
self.labels = labels = tf.placeholder(tf.float32, [None, None, None],
name=task_name + '_labels')
class PredictionModule(object):
def __init__(self, name, input_reprs, roll_direction=0, activate=True):
self.name = name
with tf.variable_scope(name + '/predictions'):
projected = model_helpers.project(input_reprs, config.projection_size)
if activate:
projected = tf.nn.relu(projected)
self.logits = tf.layers.dense(projected, n_classes, name='predict')
targets = labels
targets *= (1 - inputs.label_smoothing)
targets += inputs.label_smoothing / n_classes
self.loss = model_helpers.masked_ce_loss(
self.logits, targets, inputs.mask, roll_direction=roll_direction)
primary = PredictionModule('primary',
([encoder.uni_reprs, encoder.bi_reprs]))
ps = [
PredictionModule('full', ([encoder.uni_reprs, encoder.bi_reprs]),
activate=False),
PredictionModule('forwards', [encoder.uni_fw]),
PredictionModule('backwards', [encoder.uni_bw]),
PredictionModule('future', [encoder.uni_fw], roll_direction=1),
PredictionModule('past', [encoder.uni_bw], roll_direction=-1),
]
self.unsupervised_loss = sum(p.loss for p in ps)
self.supervised_loss = primary.loss
self.probs = tf.nn.softmax(primary.logits)
self.preds = tf.argmax(primary.logits, axis=-1)
def update_feed_dict(self, feed, mb):
if self.task_name in mb.teacher_predictions:
feed[self.labels] = mb.teacher_predictions[self.task_name]
elif mb.task_name != 'unlabeled':
labels = minibatching.build_array(
[[0] + e.labels + [0] for e in mb.examples])
feed[self.labels] = np.eye(self.n_classes)[labels]
| 3,091 | 39.155844 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/task_specific/word_level/word_level_scorer.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for word-level scorers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from corpus_processing import scorer
class WordLevelScorer(scorer.Scorer):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(WordLevelScorer, self).__init__()
self._total_loss = 0
self._total_words = 0
self._examples = []
self._preds = []
def update(self, examples, predictions, loss):
super(WordLevelScorer, self).update(examples, predictions, loss)
n_words = 0
for example, preds in zip(examples, predictions):
self._examples.append(example)
self._preds.append(list(preds)[1:len(example.words) - 1])
n_words += len(example.words) - 2
self._total_loss += loss * n_words
self._total_words += n_words
def get_loss(self):
return self._total_loss / max(1, self._total_words)
| 1,604 | 31.755102 | 80 | py |
models | models-master/research/cvt_text/task_specific/word_level/depparse_scorer.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dependency parsing evaluation (computes UAS/LAS)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from task_specific.word_level import word_level_scorer
class DepparseScorer(word_level_scorer.WordLevelScorer):
def __init__(self, n_relations, punctuation):
super(DepparseScorer, self).__init__()
self._n_relations = n_relations
self._punctuation = punctuation if punctuation else None
def _get_results(self):
correct_unlabeled, correct_labeled, count = 0, 0, 0
for example, preds in zip(self._examples, self._preds):
for w, y_true, y_pred in zip(example.words[1:-1], example.labels, preds):
if w in self._punctuation:
continue
count += 1
correct_labeled += (1 if y_pred == y_true else 0)
correct_unlabeled += (1 if int(y_pred // self._n_relations) ==
int(y_true // self._n_relations) else 0)
return [
("las", 100.0 * correct_labeled / count),
("uas", 100.0 * correct_unlabeled / count),
("loss", self.get_loss()),
]
| 1,810 | 38.369565 | 80 | py |
models | models-master/research/cvt_text/base/embeddings.py | # coding=utf-8
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for handling word embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import numpy as np
import tensorflow as tf
from base import utils
_CHARS = [
# punctuation
'!', '\'', '#', '$', '%', '&', '"', '(', ')', '*', '+', ',', '-', '.',
'/', '\\', '_', '`', '{', '}', '[', ']', '<', '>', ':', ';', '?', '@',
# digits
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
# letters
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
# special characters
'£', '€', '®', '™', '�', '½', '»', '•', '—', '“', '”', '°', '‘', '’'
]
# words not in GloVe that still should have embeddings
_EXTRA_WORDS = [
# common digit patterns
'0/0', '0/00', '00/00', '0/000',
'00/00/00', '0/00/00', '00/00/0000', '0/00/0000',
'00-00', '00-00-00', '0-00-00', '00-00-0000', '0-00-0000', '0000-00-00',
'00-0-00-0', '00000000', '0:00.000', '00:00.000',
'0%', '00%', '00.' '0000.', '0.0bn', '0.0m', '0-', '00-',
# ontonotes uses **f to represent formulas and -amp- instead of amperstands
'**f', '-amp-'
]
SPECIAL_TOKENS = ['<pad>', '<unk>', '<start>', '<end>', '<missing>']
NUM_CHARS = len(_CHARS) + len(SPECIAL_TOKENS)
PAD, UNK, START, END, MISSING = 0, 1, 2, 3, 4
class Vocabulary(collections.OrderedDict):
def __getitem__(self, w):
return self.get(w, UNK)
@utils.Memoize
def get_char_vocab():
characters = _CHARS
for i, special in enumerate(SPECIAL_TOKENS):
characters.insert(i, special)
return Vocabulary({c: i for i, c in enumerate(characters)})
@utils.Memoize
def get_inv_char_vocab():
return {i: c for c, i in get_char_vocab().items()}
def get_word_vocab(config):
return Vocabulary(utils.load_cpickle(config.word_vocabulary))
def get_word_embeddings(config):
return utils.load_cpickle(config.word_embeddings)
@utils.Memoize
def _punctuation_ids(vocab_path):
vocab = Vocabulary(utils.load_cpickle(vocab_path))
return set(i for w, i in vocab.iteritems() if w in [
'!', '...', '``', '{', '}', '(', ')', '[', ']', '--', '-', ',', '.',
"''", '`', ';', ':', '?'])
def get_punctuation_ids(config):
return _punctuation_ids(config.word_vocabulary)
class PretrainedEmbeddingLoader(object):
def __init__(self, config):
self.config = config
self.vocabulary = {}
self.vectors = []
self.vector_size = config.word_embedding_size
def _add_vector(self, w):
if w not in self.vocabulary:
self.vocabulary[w] = len(self.vectors)
self.vectors.append(np.zeros(self.vector_size, dtype='float32'))
def build(self):
utils.log('loading pretrained embeddings from',
self.config.pretrained_embeddings_file)
for special in SPECIAL_TOKENS:
self._add_vector(special)
for extra in _EXTRA_WORDS:
self._add_vector(extra)
with tf.gfile.GFile(
self.config.pretrained_embeddings_file, 'r') as f:
for i, line in enumerate(f):
if i % 10000 == 0:
utils.log('on line', i)
split = line.decode('utf8').split()
w = normalize_word(split[0])
try:
vec = np.array(map(float, split[1:]), dtype='float32')
if vec.size != self.vector_size:
utils.log('vector for line', i, 'has size', vec.size, 'so skipping')
utils.log(line[:100] + '...')
continue
except:
utils.log('can\'t parse line', i, 'so skipping')
utils.log(line[:100] + '...')
continue
if w not in self.vocabulary:
self.vocabulary[w] = len(self.vectors)
self.vectors.append(vec)
utils.log('writing vectors!')
self._write()
def _write(self):
utils.write_cpickle(np.vstack(self.vectors), self.config.word_embeddings)
utils.write_cpickle(self.vocabulary, self.config.word_vocabulary)
def normalize_chars(w):
if w == '-LRB-':
return '('
elif w == '-RRB-':
return ')'
elif w == '-LCB-':
return '{'
elif w == '-RCB-':
return '}'
elif w == '-LSB-':
return '['
elif w == '-RSB-':
return ']'
return w.replace(r'\/', '/').replace(r'\*', '*')
def normalize_word(w):
return re.sub(r'\d', '0', normalize_chars(w).lower())
| 5,180 | 29.839286 | 80 | py |
models | models-master/research/cvt_text/base/utils.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle
import sys
import tensorflow as tf
class Memoize(object):
def __init__(self, f):
self.f = f
self.cache = {}
def __call__(self, *args):
if args not in self.cache:
self.cache[args] = self.f(*args)
return self.cache[args]
def load_cpickle(path, memoized=True):
return _load_cpickle_memoize(path) if memoized else _load_cpickle(path)
def _load_cpickle(path):
with tf.gfile.GFile(path, 'r') as f:
return cPickle.load(f)
@Memoize
def _load_cpickle_memoize(path):
return _load_cpickle(path)
def write_cpickle(o, path):
tf.gfile.MakeDirs(path.rsplit('/', 1)[0])
with tf.gfile.GFile(path, 'w') as f:
cPickle.dump(o, f, -1)
def log(*args):
msg = ' '.join(map(str, args))
sys.stdout.write(msg + '\n')
sys.stdout.flush()
def heading(*args):
log()
log(80 * '=')
log(*args)
log(80 * '=')
| 1,687 | 23.463768 | 80 | py |
models | models-master/research/cvt_text/base/configure.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing hyperparameters, data locations, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from os.path import join
import tensorflow as tf
class Config(object):
"""Stores everything needed to train a model."""
def __init__(self, **kwargs):
# general
self.data_dir = './data' # top directory for data (corpora, models, etc.)
self.model_name = 'default_model' # name identifying the current model
# mode
self.mode = 'train' # either "train" or "eval"
self.task_names = ['chunk'] # list of tasks this model will learn
# more than one trains a multi-task model
self.is_semisup = True # whether to use CVT or train purely supervised
self.for_preprocessing = False # is this for the preprocessing script
# embeddings
self.pretrained_embeddings = 'glove.6B.300d.txt' # which pretrained
# embeddings to use
self.word_embedding_size = 300 # size of each word embedding
# encoder
self.use_chars = True # whether to include a character-level cnn
self.char_embedding_size = 50 # size of character embeddings
self.char_cnn_filter_widths = [2, 3, 4] # filter widths for the char cnn
self.char_cnn_n_filters = 100 # number of filters for each filter width
self.unidirectional_sizes = [1024] # size of first Bi-LSTM
self.bidirectional_sizes = [512] # size of second Bi-LSTM
self.projection_size = 512 # projections size for LSTMs and hidden layers
# dependency parsing
self.depparse_projection_size = 128 # size of the representations used in
# the bilinear classifier for parsing
# tagging
self.label_encoding = 'BIOES' # label encoding scheme for entity-level
# tagging tasks
self.label_smoothing = 0.1 # label smoothing rate for tagging tasks
# optimization
self.lr = 0.5 # base learning rate
self.momentum = 0.9 # momentum
self.grad_clip = 1.0 # maximum gradient norm during optimization
self.warm_up_steps = 5000.0 # linearly ramp up the lr for this many steps
self.lr_decay = 0.005 # factor for gradually decaying the lr
# EMA
self.ema_decay = 0.998 # EMA coefficient for averaged model weights
self.ema_test = True # whether to use EMA weights at test time
self.ema_teacher = False # whether to use EMA weights for the teacher model
# regularization
self.labeled_keep_prob = 0.5 # 1 - dropout on labeled examples
self.unlabeled_keep_prob = 0.8 # 1 - dropout on unlabeled examples
# sizing
self.max_sentence_length = 100 # maximum length of unlabeled sentences
self.max_word_length = 20 # maximum length of words for char cnn
self.train_batch_size = 64 # train batch size
self.test_batch_size = 64 # test batch size
self.buckets = [(0, 15), (15, 40), (40, 1000)] # buckets for binning
# sentences by length
# training
self.print_every = 25 # how often to print out training progress
self.eval_dev_every = 500 # how often to evaluate on the dev set
self.eval_train_every = 2000 # how often to evaluate on the train set
self.save_model_every = 1000 # how often to checkpoint the model
# data set
self.train_set_percent = 100 # how much of the train set to use
for k, v in kwargs.iteritems():
if k not in self.__dict__:
raise ValueError("Unknown argument", k)
self.__dict__[k] = v
self.dev_set = self.mode == "train" # whether to evaluate on the dev or
# test set
# locations of various data files
self.raw_data_topdir = join(self.data_dir, 'raw_data')
self.unsupervised_data = join(
self.raw_data_topdir,
'unlabeled_data',
'1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled')
self.pretrained_embeddings_file = join(
self.raw_data_topdir, 'pretrained_embeddings',
self.pretrained_embeddings)
self.preprocessed_data_topdir = join(self.data_dir, 'preprocessed_data')
self.embeddings_dir = join(self.preprocessed_data_topdir,
self.pretrained_embeddings.rsplit('.', 1)[0])
self.word_vocabulary = join(self.embeddings_dir, 'word_vocabulary.pkl')
self.word_embeddings = join(self.embeddings_dir, 'word_embeddings.pkl')
self.model_dir = join(self.data_dir, "models", self.model_name)
self.checkpoints_dir = join(self.model_dir, 'checkpoints')
self.checkpoint = join(self.checkpoints_dir, 'checkpoint.ckpt')
self.best_model_checkpoints_dir = join(
self.model_dir, 'best_model_checkpoints')
self.best_model_checkpoint = join(
self.best_model_checkpoints_dir, 'checkpoint.ckpt')
self.progress = join(self.checkpoints_dir, 'progress.pkl')
self.summaries_dir = join(self.model_dir, 'summaries')
self.history_file = join(self.model_dir, 'history.pkl')
def write(self):
tf.gfile.MakeDirs(self.model_dir)
with open(join(self.model_dir, 'config.json'), 'w') as f:
f.write(json.dumps(self.__dict__, sort_keys=True, indent=4,
separators=(',', ': ')))
| 6,100 | 42.578571 | 80 | py |
models | models-master/research/cvt_text/base/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/cvt_text/model/task_module.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for task-specific modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class SupervisedModule(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.supervised_loss = NotImplemented
self.probs = NotImplemented
self.preds = NotImplemented
@abc.abstractmethod
def update_feed_dict(self, feed, mb):
pass
class SemiSupervisedModule(SupervisedModule):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(SemiSupervisedModule, self).__init__()
self.unsupervised_loss = NotImplemented
| 1,317 | 28.288889 | 80 | py |
models | models-master/research/cvt_text/model/shared_inputs.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Placeholders for non-task-specific model inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Inputs(object):
def __init__(self, config):
self._config = config
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.label_smoothing = tf.placeholder(tf.float32, name='label_smoothing')
self.lengths = tf.placeholder(tf.int32, shape=[None], name='lengths')
self.mask = tf.placeholder(tf.float32, [None, None], name='mask')
self.words = tf.placeholder(tf.int32, shape=[None, None], name='words')
self.chars = tf.placeholder(tf.int32, shape=[None, None, None],
name='chars')
def create_feed_dict(self, mb, is_training):
cvt = mb.task_name == 'unlabeled'
return {
self.keep_prob: 1.0 if not is_training else
(self._config.unlabeled_keep_prob if cvt else
self._config.labeled_keep_prob),
self.label_smoothing: self._config.label_smoothing
if (is_training and not cvt) else 0.0,
self.lengths: mb.lengths,
self.words: mb.words,
self.chars: mb.chars,
self.mask: mb.mask.astype('float32')
}
| 1,989 | 39.612245 | 80 | py |
models | models-master/research/cvt_text/model/encoder.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CNN-BiLSTM sentence encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from base import embeddings
from model import model_helpers
class Encoder(object):
def __init__(self, config, inputs, pretrained_embeddings):
self._config = config
self._inputs = inputs
self.word_reprs = self._get_word_reprs(pretrained_embeddings)
self.uni_fw, self.uni_bw = self._get_unidirectional_reprs(self.word_reprs)
self.uni_reprs = tf.concat([self.uni_fw, self.uni_bw], axis=-1)
self.bi_fw, self.bi_bw, self.bi_reprs = self._get_bidirectional_reprs(
self.uni_reprs)
def _get_word_reprs(self, pretrained_embeddings):
with tf.variable_scope('word_embeddings'):
word_embedding_matrix = tf.get_variable(
'word_embedding_matrix', initializer=pretrained_embeddings)
word_embeddings = tf.nn.embedding_lookup(
word_embedding_matrix, self._inputs.words)
word_embeddings = tf.nn.dropout(word_embeddings, self._inputs.keep_prob)
word_embeddings *= tf.get_variable('emb_scale', initializer=1.0)
if not self._config.use_chars:
return word_embeddings
with tf.variable_scope('char_embeddings'):
char_embedding_matrix = tf.get_variable(
'char_embeddings',
shape=[embeddings.NUM_CHARS, self._config.char_embedding_size])
char_embeddings = tf.nn.embedding_lookup(char_embedding_matrix,
self._inputs.chars)
shape = tf.shape(char_embeddings)
char_embeddings = tf.reshape(
char_embeddings,
shape=[-1, shape[-2], self._config.char_embedding_size])
char_reprs = []
for filter_width in self._config.char_cnn_filter_widths:
conv = tf.layers.conv1d(
char_embeddings, self._config.char_cnn_n_filters, filter_width)
conv = tf.nn.relu(conv)
conv = tf.nn.dropout(tf.reduce_max(conv, axis=1),
self._inputs.keep_prob)
conv = tf.reshape(conv, shape=[-1, shape[1],
self._config.char_cnn_n_filters])
char_reprs.append(conv)
return tf.concat([word_embeddings] + char_reprs, axis=-1)
def _get_unidirectional_reprs(self, word_reprs):
with tf.variable_scope('unidirectional_reprs'):
word_lstm_input_size = (
self._config.word_embedding_size if not self._config.use_chars else
(self._config.word_embedding_size +
len(self._config.char_cnn_filter_widths)
* self._config.char_cnn_n_filters))
word_reprs.set_shape([None, None, word_lstm_input_size])
(outputs_fw, outputs_bw), _ = tf.nn.bidirectional_dynamic_rnn(
model_helpers.multi_lstm_cell(self._config.unidirectional_sizes,
self._inputs.keep_prob,
self._config.projection_size),
model_helpers.multi_lstm_cell(self._config.unidirectional_sizes,
self._inputs.keep_prob,
self._config.projection_size),
word_reprs,
dtype=tf.float32,
sequence_length=self._inputs.lengths,
scope='unilstm'
)
return outputs_fw, outputs_bw
def _get_bidirectional_reprs(self, uni_reprs):
with tf.variable_scope('bidirectional_reprs'):
current_outputs = uni_reprs
outputs_fw, outputs_bw = None, None
for size in self._config.bidirectional_sizes:
(outputs_fw, outputs_bw), _ = tf.nn.bidirectional_dynamic_rnn(
model_helpers.lstm_cell(size, self._inputs.keep_prob,
self._config.projection_size),
model_helpers.lstm_cell(size, self._inputs.keep_prob,
self._config.projection_size),
current_outputs,
dtype=tf.float32,
sequence_length=self._inputs.lengths,
scope='bilstm'
)
current_outputs = tf.concat([outputs_fw, outputs_bw], axis=-1)
return outputs_fw, outputs_bw, current_outputs
| 4,881 | 42.981982 | 80 | py |
models | models-master/research/cvt_text/model/multitask_model.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multi-task and semi-supervised NLP model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from model import encoder
from model import shared_inputs
class Inference(object):
def __init__(self, config, inputs, pretrained_embeddings, tasks):
with tf.variable_scope('encoder'):
self.encoder = encoder.Encoder(config, inputs, pretrained_embeddings)
self.modules = {}
for task in tasks:
with tf.variable_scope(task.name):
self.modules[task.name] = task.get_module(inputs, self.encoder)
class Model(object):
def __init__(self, config, pretrained_embeddings, tasks):
self._config = config
self._tasks = tasks
self._global_step, self._optimizer = self._get_optimizer()
self._inputs = shared_inputs.Inputs(config)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE) as scope:
inference = Inference(config, self._inputs, pretrained_embeddings,
tasks)
self._trainer = inference
self._tester = inference
self._teacher = inference
if config.ema_test or config.ema_teacher:
ema = tf.train.ExponentialMovingAverage(config.ema_decay)
model_vars = tf.get_collection("trainable_variables", "model")
ema_op = ema.apply(model_vars)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)
def ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
return ema.average(var)
scope.set_custom_getter(ema_getter)
inference_ema = Inference(
config, self._inputs, pretrained_embeddings, tasks)
if config.ema_teacher:
self._teacher = inference_ema
if config.ema_test:
self._tester = inference_ema
self._unlabeled_loss = self._get_consistency_loss(tasks)
self._unlabeled_train_op = self._get_train_op(self._unlabeled_loss)
self._labeled_train_ops = {}
for task in self._tasks:
task_loss = self._trainer.modules[task.name].supervised_loss
self._labeled_train_ops[task.name] = self._get_train_op(task_loss)
def _get_consistency_loss(self, tasks):
return sum([self._trainer.modules[task.name].unsupervised_loss
for task in tasks])
def _get_optimizer(self):
global_step = tf.get_variable('global_step', initializer=0, trainable=False)
warm_up_multiplier = (tf.minimum(tf.to_float(global_step),
self._config.warm_up_steps)
/ self._config.warm_up_steps)
decay_multiplier = 1.0 / (1 + self._config.lr_decay *
tf.sqrt(tf.to_float(global_step)))
lr = self._config.lr * warm_up_multiplier * decay_multiplier
optimizer = tf.train.MomentumOptimizer(lr, self._config.momentum)
return global_step, optimizer
def _get_train_op(self, loss):
grads, vs = zip(*self._optimizer.compute_gradients(loss))
grads, _ = tf.clip_by_global_norm(grads, self._config.grad_clip)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
return self._optimizer.apply_gradients(
zip(grads, vs), global_step=self._global_step)
def _create_feed_dict(self, mb, model, is_training=True):
feed = self._inputs.create_feed_dict(mb, is_training)
if mb.task_name in model.modules:
model.modules[mb.task_name].update_feed_dict(feed, mb)
else:
for module in model.modules.values():
module.update_feed_dict(feed, mb)
return feed
def train_unlabeled(self, sess, mb):
return sess.run([self._unlabeled_train_op, self._unlabeled_loss],
feed_dict=self._create_feed_dict(mb, self._trainer))[1]
def train_labeled(self, sess, mb):
return sess.run([self._labeled_train_ops[mb.task_name],
self._trainer.modules[mb.task_name].supervised_loss,],
feed_dict=self._create_feed_dict(mb, self._trainer))[1]
def run_teacher(self, sess, mb):
result = sess.run({task.name: self._teacher.modules[task.name].probs
for task in self._tasks},
feed_dict=self._create_feed_dict(mb, self._teacher,
False))
for task_name, probs in result.iteritems():
mb.teacher_predictions[task_name] = probs.astype('float16')
def test(self, sess, mb):
return sess.run(
[self._tester.modules[mb.task_name].supervised_loss,
self._tester.modules[mb.task_name].preds],
feed_dict=self._create_feed_dict(mb, self._tester, False))
def get_global_step(self, sess):
return sess.run(self._global_step)
| 5,442 | 39.924812 | 80 | py |
models | models-master/research/cvt_text/model/model_helpers.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def project(input_layers, size, name='projection'):
return tf.add_n([tf.layers.dense(layer, size, name=name + '_' + str(i))
for i, layer in enumerate(input_layers)])
def lstm_cell(cell_size, keep_prob, num_proj):
return tf.contrib.rnn.DropoutWrapper(
tf.contrib.rnn.LSTMCell(cell_size, num_proj=min(cell_size, num_proj)),
output_keep_prob=keep_prob)
def multi_lstm_cell(cell_sizes, keep_prob, num_proj):
return tf.contrib.rnn.MultiRNNCell([lstm_cell(cell_size, keep_prob, num_proj)
for cell_size in cell_sizes])
def masked_ce_loss(logits, labels, mask, sparse=False, roll_direction=0):
if roll_direction != 0:
labels = _roll(labels, roll_direction, sparse)
mask *= _roll(mask, roll_direction, True)
ce = ((tf.nn.sparse_softmax_cross_entropy_with_logits if sparse
else tf.nn.softmax_cross_entropy_with_logits_v2)
(logits=logits, labels=labels))
return tf.reduce_sum(mask * ce) / tf.to_float(tf.reduce_sum(mask))
def _roll(arr, direction, sparse=False):
if sparse:
return tf.concat([arr[:, direction:], arr[:, :direction]], axis=1)
return tf.concat([arr[:, direction:, :], arr[:, :direction, :]], axis=1)
| 2,091 | 37.036364 | 80 | py |
models | models-master/research/cvt_text/model/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/convert_to_tflite.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to convert a quantized deeplab model to tflite."""
from absl import app
from absl import flags
import numpy as np
from PIL import Image
import tensorflow as tf
flags.DEFINE_string('quantized_graph_def_path', None,
'Path to quantized graphdef.')
flags.DEFINE_string('output_tflite_path', None, 'Output TFlite model path.')
flags.DEFINE_string(
'input_tensor_name', None,
'Input tensor to TFlite model. This usually should be the input tensor to '
'model backbone.'
)
flags.DEFINE_string(
'output_tensor_name', 'ArgMax:0',
'Output tensor name of TFlite model. By default we output the raw semantic '
'label predictions.'
)
flags.DEFINE_string(
'test_image_path', None,
'Path to an image to test the consistency between input graphdef / '
'converted tflite model.'
)
FLAGS = flags.FLAGS
def convert_to_tflite(quantized_graphdef,
backbone_input_tensor,
output_tensor):
"""Helper method to convert quantized deeplab model to TFlite."""
with tf.Graph().as_default() as graph:
tf.graph_util.import_graph_def(quantized_graphdef, name='')
sess = tf.compat.v1.Session()
tflite_input = graph.get_tensor_by_name(backbone_input_tensor)
tflite_output = graph.get_tensor_by_name(output_tensor)
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, [tflite_input], [tflite_output])
converter.inference_type = tf.compat.v1.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
converter.quantized_input_stats = {input_arrays[0]: (127.5, 127.5)}
return converter.convert()
def check_tflite_consistency(graph_def, tflite_model, image_path):
"""Runs tflite and frozen graph on same input, check their outputs match."""
# Load tflite model and check input size.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height, width = input_details[0]['shape'][1:3]
# Prepare input image data.
with tf.io.gfile.GFile(image_path, 'rb') as f:
image = Image.open(f)
image = np.asarray(image.convert('RGB').resize((width, height)))
image = np.expand_dims(image, 0)
# Output from tflite model.
interpreter.set_tensor(input_details[0]['index'], image)
interpreter.invoke()
output_tflite = interpreter.get_tensor(output_details[0]['index'])
with tf.Graph().as_default():
tf.graph_util.import_graph_def(graph_def, name='')
with tf.compat.v1.Session() as sess:
# Note here the graph will include preprocessing part of the graph
# (e.g. resize, pad, normalize). Given the input image size is at the
# crop size (backbone input size), resize / pad should be an identity op.
output_graph = sess.run(
FLAGS.output_tensor_name, feed_dict={'ImageTensor:0': image})
print('%.2f%% pixels have matched semantic labels.' % (
100 * np.mean(output_graph == output_tflite)))
def main(unused_argv):
with tf.io.gfile.GFile(FLAGS.quantized_graph_def_path, 'rb') as f:
graph_def = tf.compat.v1.GraphDef.FromString(f.read())
tflite_model = convert_to_tflite(
graph_def, FLAGS.input_tensor_name, FLAGS.output_tensor_name)
if FLAGS.output_tflite_path:
with tf.io.gfile.GFile(FLAGS.output_tflite_path, 'wb') as f:
f.write(tflite_model)
if FLAGS.test_image_path:
check_tflite_consistency(graph_def, tflite_model, FLAGS.test_image_path)
if __name__ == '__main__':
app.run(main)
| 4,276 | 36.849558 | 80 | py |
models | models-master/research/deeplab/input_preprocess.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepares the data used for DeepLab training/evaluation."""
import tensorflow as tf
from deeplab.core import feature_extractor
from deeplab.core import preprocess_utils
# The probability of flipping the images and labels
# left-right during training
_PROB_OF_FLIP = 0.5
def preprocess_image_and_label(image,
label,
crop_height,
crop_width,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
ignore_label=255,
is_training=True,
model_variant=None):
"""Preprocesses the image and label.
Args:
image: Input image.
label: Ground truth annotation label.
crop_height: The height value used to crop the image and label.
crop_width: The width value used to crop the image and label.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
ignore_label: The label value which will be ignored for training and
evaluation.
is_training: If the preprocessing is used for training or not.
model_variant: Model variant (string) for choosing how to mean-subtract the
images. See feature_extractor.network_map for supported model variants.
Returns:
original_image: Original image (could be resized).
processed_image: Preprocessed image.
label: Preprocessed ground truth segmentation label.
Raises:
ValueError: Ground truth label not provided during training.
"""
if is_training and label is None:
raise ValueError('During training, label must be provided.')
if model_variant is None:
tf.logging.warning('Default mean-subtraction is performed. Please specify '
'a model_variant. See feature_extractor.network_map for '
'supported model variants.')
# Keep reference to original image.
original_image = image
processed_image = tf.cast(image, tf.float32)
if label is not None:
label = tf.cast(label, tf.int32)
# Resize image and label to the desired range.
if min_resize_value or max_resize_value:
[processed_image, label] = (
preprocess_utils.resize_to_range(
image=processed_image,
label=label,
min_size=min_resize_value,
max_size=max_resize_value,
factor=resize_factor,
align_corners=True))
# The `original_image` becomes the resized image.
original_image = tf.identity(processed_image)
# Data augmentation by randomly scaling the inputs.
if is_training:
scale = preprocess_utils.get_random_scale(
min_scale_factor, max_scale_factor, scale_factor_step_size)
processed_image, label = preprocess_utils.randomly_scale_image_and_label(
processed_image, label, scale)
processed_image.set_shape([None, None, 3])
# Pad image and label to have dimensions >= [crop_height, crop_width]
image_shape = tf.shape(processed_image)
image_height = image_shape[0]
image_width = image_shape[1]
target_height = image_height + tf.maximum(crop_height - image_height, 0)
target_width = image_width + tf.maximum(crop_width - image_width, 0)
# Pad image with mean pixel value.
mean_pixel = tf.reshape(
feature_extractor.mean_pixel(model_variant), [1, 1, 3])
processed_image = preprocess_utils.pad_to_bounding_box(
processed_image, 0, 0, target_height, target_width, mean_pixel)
if label is not None:
label = preprocess_utils.pad_to_bounding_box(
label, 0, 0, target_height, target_width, ignore_label)
# Randomly crop the image and label.
if is_training and label is not None:
processed_image, label = preprocess_utils.random_crop(
[processed_image, label], crop_height, crop_width)
processed_image.set_shape([crop_height, crop_width, 3])
if label is not None:
label.set_shape([crop_height, crop_width, 1])
if is_training:
# Randomly left-right flip the image and label.
processed_image, label, _ = preprocess_utils.flip_dim(
[processed_image, label], _PROB_OF_FLIP, dim=1)
return original_image, processed_image, label
| 5,580 | 38.864286 | 80 | py |
models | models-master/research/deeplab/vis.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Segmentation results visualization on a given set of images.
See model.py for more details and usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import training as contrib_training
from deeplab import common
from deeplab import model
from deeplab.datasets import data_generator
from deeplab.utils import save_annotation
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for visualizing the model.
flags.DEFINE_integer('vis_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_list('vis_crop_size', '513,513',
'Crop size [height, width] for visualization.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('vis_split', 'val',
'Which split of the dataset used for visualizing results')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes', 'ade20k'],
'Visualization colormap type.')
flags.DEFINE_boolean('also_save_raw_predictions', False,
'Also save raw predictions.')
flags.DEFINE_integer('max_number_of_iterations', 0,
'Maximum number of visualization iterations. Will loop '
'indefinitely upon nonpositive values.')
# The folder where semantic segmentation predictions are saved.
_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results'
# The folder where raw semantic segmentation predictions are saved.
_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results'
# The format to save image.
_IMAGE_FORMAT = '%06d_image'
# The format to save prediction
_PREDICTION_FORMAT = '%06d_prediction'
# To evaluate Cityscapes results on the evaluation server, the labels used
# during training should be mapped to the labels for evaluation.
_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def _process_batch(sess, original_images, semantic_predictions, image_names,
image_heights, image_widths, image_id_offset, save_dir,
raw_save_dir, train_id_to_eval_id=None):
"""Evaluates one single batch qualitatively.
Args:
sess: TensorFlow session.
original_images: One batch of original images.
semantic_predictions: One batch of semantic segmentation predictions.
image_names: Image names.
image_heights: Image heights.
image_widths: Image widths.
image_id_offset: Image id offset for indexing images.
save_dir: The directory where the predictions will be saved.
raw_save_dir: The directory where the raw predictions will be saved.
train_id_to_eval_id: A list mapping from train id to eval id.
"""
(original_images,
semantic_predictions,
image_names,
image_heights,
image_widths) = sess.run([original_images, semantic_predictions,
image_names, image_heights, image_widths])
num_image = semantic_predictions.shape[0]
for i in range(num_image):
image_height = np.squeeze(image_heights[i])
image_width = np.squeeze(image_widths[i])
original_image = np.squeeze(original_images[i])
semantic_prediction = np.squeeze(semantic_predictions[i])
crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
# Save image.
save_annotation.save_annotation(
original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
add_colormap=False)
# Save prediction.
save_annotation.save_annotation(
crop_semantic_prediction, save_dir,
_PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
colormap_type=FLAGS.colormap_type)
if FLAGS.also_save_raw_predictions:
image_filename = os.path.basename(image_names[i])
if train_id_to_eval_id is not None:
crop_semantic_prediction = _convert_train_id_to_eval_id(
crop_semantic_prediction,
train_id_to_eval_id)
save_annotation.save_annotation(
crop_semantic_prediction, raw_save_dir, image_filename,
add_colormap=False)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Get dataset-dependent information.
dataset = data_generator.Dataset(
dataset_name=FLAGS.dataset,
split_name=FLAGS.vis_split,
dataset_dir=FLAGS.dataset_dir,
batch_size=FLAGS.vis_batch_size,
crop_size=[int(sz) for sz in FLAGS.vis_crop_size],
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
model_variant=FLAGS.model_variant,
is_training=False,
should_shuffle=False,
should_repeat=False)
train_id_to_eval_id = None
if dataset.dataset_name == data_generator.get_cityscapes_dataset_name():
tf.logging.info('Cityscapes requires converting train_id to eval_id.')
train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID
# Prepare for visualization.
tf.gfile.MakeDirs(FLAGS.vis_logdir)
save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(save_dir)
raw_save_dir = os.path.join(
FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(raw_save_dir)
tf.logging.info('Visualizing on %s set', FLAGS.vis_split)
with tf.Graph().as_default():
samples = dataset.get_one_shot_iterator().get_next()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
crop_size=[int(sz) for sz in FLAGS.vis_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
if FLAGS.quantize_delay_step >= 0:
raise ValueError(
'Quantize mode is not supported with multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
if FLAGS.min_resize_value and FLAGS.max_resize_value:
# Only support batch_size = 1, since we assume the dimensions of original
# image after tf.squeeze is [height, width, 3].
assert FLAGS.vis_batch_size == 1
# Reverse the resizing and padding operations performed in preprocessing.
# First, we slice the valid regions (i.e., remove padded region) and then
# we resize the predictions back.
original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
original_image_shape = tf.shape(original_image)
predictions = tf.slice(
predictions,
[0, 0, 0],
[1, original_image_shape[0], original_image_shape[1]])
resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]),
tf.squeeze(samples[common.WIDTH])])
predictions = tf.squeeze(
tf.image.resize_images(tf.expand_dims(predictions, 3),
resized_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True), 3)
tf.train.get_or_create_global_step()
if FLAGS.quantize_delay_step >= 0:
contrib_quantize.create_eval_graph()
num_iteration = 0
max_num_iteration = FLAGS.max_number_of_iterations
checkpoints_iterator = contrib_training.checkpoints_iterator(
FLAGS.checkpoint_dir, min_interval_secs=FLAGS.eval_interval_secs)
for checkpoint_path in checkpoints_iterator:
num_iteration += 1
tf.logging.info(
'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
tf.logging.info('Visualizing with model %s', checkpoint_path)
scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer())
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
batch = 0
image_id_offset = 0
while not sess.should_stop():
tf.logging.info('Visualizing batch %d', batch + 1)
_process_batch(sess=sess,
original_images=samples[common.ORIGINAL_IMAGE],
semantic_predictions=predictions,
image_names=samples[common.IMAGE_NAME],
image_heights=samples[common.HEIGHT],
image_widths=samples[common.WIDTH],
image_id_offset=image_id_offset,
save_dir=save_dir,
raw_save_dir=raw_save_dir,
train_id_to_eval_id=train_id_to_eval_id)
image_id_offset += FLAGS.vis_batch_size
batch += 1
tf.logging.info(
'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
if max_num_iteration > 0 and num_iteration >= max_num_iteration:
break
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('vis_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
| 12,909 | 38.359756 | 80 | py |
models | models-master/research/deeplab/model.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Provides DeepLab model definition and helper functions.
DeepLab is a deep learning system for semantic image segmentation with
the following features:
(1) Atrous convolution to explicitly control the resolution at which
feature responses are computed within Deep Convolutional Neural Networks.
(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at
multiple scales with filters at multiple sampling rates and effective
fields-of-views.
(3) ASPP module augmented with image-level feature and batch normalization.
(4) A simple yet effective decoder module to recover the object boundaries.
See the following papers for more details:
"Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation"
Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam.
(https://arxiv.org/abs/1802.02611)
"Rethinking Atrous Convolution for Semantic Image Segmentation,"
Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam
(https://arxiv.org/abs/1706.05587)
"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs",
Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy,
Alan L Yuille (* equal contribution)
(https://arxiv.org/abs/1606.00915)
"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected
CRFs"
Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy,
Alan L. Yuille (* equal contribution)
(https://arxiv.org/abs/1412.7062)
"""
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import dense_prediction_cell
from deeplab.core import feature_extractor
from deeplab.core import utils
slim = contrib_slim
LOGITS_SCOPE_NAME = 'logits'
MERGED_LOGITS_SCOPE = 'merged_logits'
IMAGE_POOLING_SCOPE = 'image_pooling'
ASPP_SCOPE = 'aspp'
CONCAT_PROJECTION_SCOPE = 'concat_projection'
DECODER_SCOPE = 'decoder'
META_ARCHITECTURE_SCOPE = 'meta_architecture'
PROB_SUFFIX = '_prob'
_resize_bilinear = utils.resize_bilinear
scale_dimension = utils.scale_dimension
split_separable_conv2d = utils.split_separable_conv2d
def get_extra_layer_scopes(last_layers_contain_logits_only=False):
"""Gets the scopes for extra layers.
Args:
last_layers_contain_logits_only: Boolean, True if only consider logits as
the last layer (i.e., exclude ASPP module, decoder module and so on)
Returns:
A list of scopes for extra layers.
"""
if last_layers_contain_logits_only:
return [LOGITS_SCOPE_NAME]
else:
return [
LOGITS_SCOPE_NAME,
IMAGE_POOLING_SCOPE,
ASPP_SCOPE,
CONCAT_PROJECTION_SCOPE,
DECODER_SCOPE,
META_ARCHITECTURE_SCOPE,
]
def predict_labels_multi_scale(images,
model_options,
eval_scales=(1.0,),
add_flipped_images=False):
"""Predicts segmentation labels.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
eval_scales: The scales to resize images for evaluation.
add_flipped_images: Add flipped images for evaluation or not.
Returns:
A dictionary with keys specifying the output_type (e.g., semantic
prediction) and values storing Tensors representing predictions (argmax
over channels). Each prediction has size [batch, height, width].
"""
outputs_to_predictions = {
output: []
for output in model_options.outputs_to_num_classes
}
for i, image_scale in enumerate(eval_scales):
with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None):
outputs_to_scales_to_logits = multi_scale_logits(
images,
model_options=model_options,
image_pyramid=[image_scale],
is_training=False,
fine_tune_batch_norm=False)
if add_flipped_images:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
outputs_to_scales_to_logits_reversed = multi_scale_logits(
tf.reverse_v2(images, [2]),
model_options=model_options,
image_pyramid=[image_scale],
is_training=False,
fine_tune_batch_norm=False)
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = _resize_bilinear(
scales_to_logits[MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits), 4))
if add_flipped_images:
scales_to_logits_reversed = (
outputs_to_scales_to_logits_reversed[output])
logits_reversed = _resize_bilinear(
tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]),
tf.shape(images)[1:3],
scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
for output in sorted(outputs_to_predictions):
predictions = outputs_to_predictions[output]
# Compute average prediction across different scales and flipped images.
predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4)
outputs_to_predictions[output] = tf.argmax(predictions, 3)
outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions)
return outputs_to_predictions
def predict_labels(images, model_options, image_pyramid=None):
"""Predicts segmentation labels.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
image_pyramid: Input image scales for multi-scale feature extraction.
Returns:
A dictionary with keys specifying the output_type (e.g., semantic
prediction) and values storing Tensors representing predictions (argmax
over channels). Each prediction has size [batch, height, width].
"""
outputs_to_scales_to_logits = multi_scale_logits(
images,
model_options=model_options,
image_pyramid=image_pyramid,
is_training=False,
fine_tune_batch_norm=False)
predictions = {}
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = scales_to_logits[MERGED_LOGITS_SCOPE]
# There are two ways to obtain the final prediction results: (1) bilinear
# upsampling the logits followed by argmax, or (2) argmax followed by
# nearest neighbor upsampling. The second option may introduce the "blocking
# effect" but is computationally efficient.
if model_options.prediction_with_upsampled_logits:
logits = _resize_bilinear(logits,
tf.shape(images)[1:3],
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
predictions[output] = tf.argmax(logits, 3)
predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits)
else:
argmax_results = tf.argmax(logits, 3)
argmax_results = tf.image.resize_nearest_neighbor(
tf.expand_dims(argmax_results, 3),
tf.shape(images)[1:3],
align_corners=True,
name='resize_prediction')
predictions[output] = tf.squeeze(argmax_results, 3)
predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear(
tf.nn.softmax(logits),
tf.shape(images)[1:3],
align_corners=True,
name='resize_prob')
return predictions
def multi_scale_logits(images,
model_options,
image_pyramid,
weight_decay=0.0001,
is_training=False,
fine_tune_batch_norm=False,
nas_training_hyper_parameters=None):
"""Gets the logits for multi-scale inputs.
The returned logits are all downsampled (due to max-pooling layers)
for both training and evaluation.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
image_pyramid: Input image scales for multi-scale feature extraction.
weight_decay: The weight decay for model variables.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
nas_training_hyper_parameters: A dictionary storing hyper-parameters for
training nas models. Its keys are:
- `drop_path_keep_prob`: Probability to keep each path in the cell when
training.
- `total_training_steps`: Total training steps to help drop path
probability calculation.
Returns:
outputs_to_scales_to_logits: A map of maps from output_type (e.g.,
semantic prediction) to a dictionary of multi-scale logits names to
logits. For each output_type, the dictionary has keys which
correspond to the scales and values which correspond to the logits.
For example, if `scales` equals [1.0, 1.5], then the keys would
include 'merged_logits', 'logits_1.00' and 'logits_1.50'.
Raises:
ValueError: If model_options doesn't specify crop_size and its
add_image_level_feature = True, since add_image_level_feature requires
crop_size information.
"""
# Setup default values.
if not image_pyramid:
image_pyramid = [1.0]
crop_height = (
model_options.crop_size[0]
if model_options.crop_size else tf.shape(images)[1])
crop_width = (
model_options.crop_size[1]
if model_options.crop_size else tf.shape(images)[2])
if model_options.image_pooling_crop_size:
image_pooling_crop_height = model_options.image_pooling_crop_size[0]
image_pooling_crop_width = model_options.image_pooling_crop_size[1]
# Compute the height, width for the output logits.
if model_options.decoder_output_stride:
logits_output_stride = min(model_options.decoder_output_stride)
else:
logits_output_stride = model_options.output_stride
logits_height = scale_dimension(
crop_height,
max(1.0, max(image_pyramid)) / logits_output_stride)
logits_width = scale_dimension(
crop_width,
max(1.0, max(image_pyramid)) / logits_output_stride)
# Compute the logits for each scale in the image pyramid.
outputs_to_scales_to_logits = {
k: {}
for k in model_options.outputs_to_num_classes
}
num_channels = images.get_shape().as_list()[-1]
for image_scale in image_pyramid:
if image_scale != 1.0:
scaled_height = scale_dimension(crop_height, image_scale)
scaled_width = scale_dimension(crop_width, image_scale)
scaled_crop_size = [scaled_height, scaled_width]
scaled_images = _resize_bilinear(images, scaled_crop_size, images.dtype)
if model_options.crop_size:
scaled_images.set_shape(
[None, scaled_height, scaled_width, num_channels])
# Adjust image_pooling_crop_size accordingly.
scaled_image_pooling_crop_size = None
if model_options.image_pooling_crop_size:
scaled_image_pooling_crop_size = [
scale_dimension(image_pooling_crop_height, image_scale),
scale_dimension(image_pooling_crop_width, image_scale)]
else:
scaled_crop_size = model_options.crop_size
scaled_images = images
scaled_image_pooling_crop_size = model_options.image_pooling_crop_size
updated_options = model_options._replace(
crop_size=scaled_crop_size,
image_pooling_crop_size=scaled_image_pooling_crop_size)
outputs_to_logits = _get_logits(
scaled_images,
updated_options,
weight_decay=weight_decay,
reuse=tf.AUTO_REUSE,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm,
nas_training_hyper_parameters=nas_training_hyper_parameters)
# Resize the logits to have the same dimension before merging.
for output in sorted(outputs_to_logits):
outputs_to_logits[output] = _resize_bilinear(
outputs_to_logits[output], [logits_height, logits_width],
outputs_to_logits[output].dtype)
# Return when only one input scale.
if len(image_pyramid) == 1:
for output in sorted(model_options.outputs_to_num_classes):
outputs_to_scales_to_logits[output][
MERGED_LOGITS_SCOPE] = outputs_to_logits[output]
return outputs_to_scales_to_logits
# Save logits to the output map.
for output in sorted(model_options.outputs_to_num_classes):
outputs_to_scales_to_logits[output][
'logits_%.2f' % image_scale] = outputs_to_logits[output]
# Merge the logits from all the multi-scale inputs.
for output in sorted(model_options.outputs_to_num_classes):
# Concatenate the multi-scale logits for each output type.
all_logits = [
tf.expand_dims(logits, axis=4)
for logits in outputs_to_scales_to_logits[output].values()
]
all_logits = tf.concat(all_logits, 4)
merge_fn = (
tf.reduce_max
if model_options.merge_method == 'max' else tf.reduce_mean)
outputs_to_scales_to_logits[output][MERGED_LOGITS_SCOPE] = merge_fn(
all_logits, axis=4)
return outputs_to_scales_to_logits
def extract_features(images,
model_options,
weight_decay=0.0001,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
nas_training_hyper_parameters=None):
"""Extracts features by the particular model_variant.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
nas_training_hyper_parameters: A dictionary storing hyper-parameters for
training nas models. Its keys are:
- `drop_path_keep_prob`: Probability to keep each path in the cell when
training.
- `total_training_steps`: Total training steps to help drop path
probability calculation.
Returns:
concat_logits: A tensor of size [batch, feature_height, feature_width,
feature_channels], where feature_height/feature_width are determined by
the images height/width and output_stride.
end_points: A dictionary from components of the network to the corresponding
activation.
"""
features, end_points = feature_extractor.extract_features(
images,
output_stride=model_options.output_stride,
multi_grid=model_options.multi_grid,
model_variant=model_options.model_variant,
depth_multiplier=model_options.depth_multiplier,
divisible_by=model_options.divisible_by,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
preprocessed_images_dtype=model_options.preprocessed_images_dtype,
fine_tune_batch_norm=fine_tune_batch_norm,
nas_architecture_options=model_options.nas_architecture_options,
nas_training_hyper_parameters=nas_training_hyper_parameters,
use_bounded_activation=model_options.use_bounded_activation)
if not model_options.aspp_with_batch_norm:
return features, end_points
else:
if model_options.dense_prediction_cell_config is not None:
tf.logging.info('Using dense prediction cell config.')
dense_prediction_layer = dense_prediction_cell.DensePredictionCell(
config=model_options.dense_prediction_cell_config,
hparams={
'conv_rate_multiplier': 16 // model_options.output_stride,
})
concat_logits = dense_prediction_layer.build_cell(
features,
output_stride=model_options.output_stride,
crop_size=model_options.crop_size,
image_pooling_crop_size=model_options.image_pooling_crop_size,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm)
return concat_logits, end_points
else:
# The following codes employ the DeepLabv3 ASPP module. Note that we
# could express the ASPP module as one particular dense prediction
# cell architecture. We do not do so but leave the following codes
# for backward compatibility.
batch_norm_params = utils.get_batch_norm_params(
decay=0.9997,
epsilon=1e-5,
scale=True,
is_training=(is_training and fine_tune_batch_norm),
sync_batch_norm_method=model_options.sync_batch_norm_method)
batch_norm = utils.get_batch_norm_fn(
model_options.sync_batch_norm_method)
activation_fn = (
tf.nn.relu6 if model_options.use_bounded_activation else tf.nn.relu)
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([batch_norm], **batch_norm_params):
depth = model_options.aspp_convs_filters
branch_logits = []
if model_options.add_image_level_feature:
if model_options.crop_size is not None:
image_pooling_crop_size = model_options.image_pooling_crop_size
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = model_options.crop_size
pool_height = scale_dimension(
image_pooling_crop_size[0],
1. / model_options.output_stride)
pool_width = scale_dimension(
image_pooling_crop_size[1],
1. / model_options.output_stride)
image_feature = slim.avg_pool2d(
features, [pool_height, pool_width],
model_options.image_pooling_stride, padding='VALID')
resize_height = scale_dimension(
model_options.crop_size[0],
1. / model_options.output_stride)
resize_width = scale_dimension(
model_options.crop_size[1],
1. / model_options.output_stride)
else:
# If crop_size is None, we simply do global pooling.
pool_height = tf.shape(features)[1]
pool_width = tf.shape(features)[2]
image_feature = tf.reduce_mean(
features, axis=[1, 2], keepdims=True)
resize_height = pool_height
resize_width = pool_width
image_feature_activation_fn = tf.nn.relu
image_feature_normalizer_fn = batch_norm
if model_options.aspp_with_squeeze_and_excitation:
image_feature_activation_fn = tf.nn.sigmoid
if model_options.image_se_uses_qsigmoid:
image_feature_activation_fn = utils.q_sigmoid
image_feature_normalizer_fn = None
image_feature = slim.conv2d(
image_feature, depth, 1,
activation_fn=image_feature_activation_fn,
normalizer_fn=image_feature_normalizer_fn,
scope=IMAGE_POOLING_SCOPE)
image_feature = _resize_bilinear(
image_feature,
[resize_height, resize_width],
image_feature.dtype)
# Set shape for resize_height/resize_width if they are not Tensor.
if isinstance(resize_height, tf.Tensor):
resize_height = None
if isinstance(resize_width, tf.Tensor):
resize_width = None
image_feature.set_shape([None, resize_height, resize_width, depth])
if not model_options.aspp_with_squeeze_and_excitation:
branch_logits.append(image_feature)
# Employ a 1x1 convolution.
branch_logits.append(slim.conv2d(features, depth, 1,
scope=ASPP_SCOPE + str(0)))
if model_options.atrous_rates:
# Employ 3x3 convolutions with different atrous rates.
for i, rate in enumerate(model_options.atrous_rates, 1):
scope = ASPP_SCOPE + str(i)
if model_options.aspp_with_separable_conv:
aspp_features = split_separable_conv2d(
features,
filters=depth,
rate=rate,
weight_decay=weight_decay,
scope=scope)
else:
aspp_features = slim.conv2d(
features, depth, 3, rate=rate, scope=scope)
branch_logits.append(aspp_features)
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if model_options.aspp_with_concat_projection:
concat_logits = slim.conv2d(
concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE)
concat_logits = slim.dropout(
concat_logits,
keep_prob=0.9,
is_training=is_training,
scope=CONCAT_PROJECTION_SCOPE + '_dropout')
if (model_options.add_image_level_feature and
model_options.aspp_with_squeeze_and_excitation):
concat_logits *= image_feature
return concat_logits, end_points
def _get_logits(images,
model_options,
weight_decay=0.0001,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
nas_training_hyper_parameters=None):
"""Gets the logits by atrous/image spatial pyramid pooling.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
nas_training_hyper_parameters: A dictionary storing hyper-parameters for
training nas models. Its keys are:
- `drop_path_keep_prob`: Probability to keep each path in the cell when
training.
- `total_training_steps`: Total training steps to help drop path
probability calculation.
Returns:
outputs_to_logits: A map from output_type to logits.
"""
features, end_points = extract_features(
images,
model_options,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm,
nas_training_hyper_parameters=nas_training_hyper_parameters)
if model_options.decoder_output_stride:
crop_size = model_options.crop_size
if crop_size is None:
crop_size = [tf.shape(images)[1], tf.shape(images)[2]]
features = refine_by_decoder(
features,
end_points,
crop_size=crop_size,
decoder_output_stride=model_options.decoder_output_stride,
decoder_use_separable_conv=model_options.decoder_use_separable_conv,
decoder_use_sum_merge=model_options.decoder_use_sum_merge,
decoder_filters=model_options.decoder_filters,
decoder_output_is_logits=model_options.decoder_output_is_logits,
model_variant=model_options.model_variant,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm,
use_bounded_activation=model_options.use_bounded_activation)
outputs_to_logits = {}
for output in sorted(model_options.outputs_to_num_classes):
if model_options.decoder_output_is_logits:
outputs_to_logits[output] = tf.identity(features,
name=output)
else:
outputs_to_logits[output] = get_branch_logits(
features,
model_options.outputs_to_num_classes[output],
model_options.atrous_rates,
aspp_with_batch_norm=model_options.aspp_with_batch_norm,
kernel_size=model_options.logits_kernel_size,
weight_decay=weight_decay,
reuse=reuse,
scope_suffix=output)
return outputs_to_logits
def refine_by_decoder(features,
end_points,
crop_size=None,
decoder_output_stride=None,
decoder_use_separable_conv=False,
decoder_use_sum_merge=False,
decoder_filters=256,
decoder_output_is_logits=False,
model_variant=None,
weight_decay=0.0001,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
use_bounded_activation=False,
sync_batch_norm_method='None'):
"""Adds the decoder to obtain sharper segmentation results.
Args:
features: A tensor of size [batch, features_height, features_width,
features_channels].
end_points: A dictionary from components of the network to the corresponding
activation.
crop_size: A tuple [crop_height, crop_width] specifying whole patch crop
size.
decoder_output_stride: A list of integers specifying the output stride of
low-level features used in the decoder module.
decoder_use_separable_conv: Employ separable convolution for decoder or not.
decoder_use_sum_merge: Boolean, decoder uses simple sum merge or not.
decoder_filters: Integer, decoder filter size.
decoder_output_is_logits: Boolean, using decoder output as logits or not.
model_variant: Model variant for feature extraction.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
sync_batch_norm_method: String, method used to sync batch norm. Currently
only support `None` (no sync batch norm) and `tpu` (use tpu code to
sync batch norm).
Returns:
Decoder output with size [batch, decoder_height, decoder_width,
decoder_channels].
Raises:
ValueError: If crop_size is None.
"""
if crop_size is None:
raise ValueError('crop_size must be provided when using decoder.')
batch_norm_params = utils.get_batch_norm_params(
decay=0.9997,
epsilon=1e-5,
scale=True,
is_training=(is_training and fine_tune_batch_norm),
sync_batch_norm_method=sync_batch_norm_method)
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
decoder_depth = decoder_filters
projected_filters = 48
if decoder_use_sum_merge:
# When using sum merge, the projected filters must be equal to decoder
# filters.
projected_filters = decoder_filters
if decoder_output_is_logits:
# Overwrite the setting when decoder output is logits.
activation_fn = None
normalizer_fn = None
conv2d_kernel = 1
# Use original conv instead of separable conv.
decoder_use_separable_conv = False
else:
# Default setting when decoder output is not logits.
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
normalizer_fn = batch_norm
conv2d_kernel = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([batch_norm], **batch_norm_params):
with tf.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]):
decoder_features = features
decoder_stage = 0
scope_suffix = ''
for output_stride in decoder_output_stride:
feature_list = feature_extractor.networks_to_feature_maps[
model_variant][
feature_extractor.DECODER_END_POINTS][output_stride]
# If only one decoder stage, we do not change the scope name in
# order for backward compactibility.
if decoder_stage:
scope_suffix = '_{}'.format(decoder_stage)
for i, name in enumerate(feature_list):
decoder_features_list = [decoder_features]
# MobileNet and NAS variants use different naming convention.
if ('mobilenet' in model_variant or
model_variant.startswith('mnas') or
model_variant.startswith('nas')):
feature_name = name
else:
feature_name = '{}/{}'.format(
feature_extractor.name_scope[model_variant], name)
decoder_features_list.append(
slim.conv2d(
end_points[feature_name],
projected_filters,
1,
scope='feature_projection' + str(i) + scope_suffix))
# Determine the output size.
decoder_height = scale_dimension(crop_size[0], 1.0 / output_stride)
decoder_width = scale_dimension(crop_size[1], 1.0 / output_stride)
# Resize to decoder_height/decoder_width.
for j, feature in enumerate(decoder_features_list):
decoder_features_list[j] = _resize_bilinear(
feature, [decoder_height, decoder_width], feature.dtype)
h = (None if isinstance(decoder_height, tf.Tensor)
else decoder_height)
w = (None if isinstance(decoder_width, tf.Tensor)
else decoder_width)
decoder_features_list[j].set_shape([None, h, w, None])
if decoder_use_sum_merge:
decoder_features = _decoder_with_sum_merge(
decoder_features_list,
decoder_depth,
conv2d_kernel=conv2d_kernel,
decoder_use_separable_conv=decoder_use_separable_conv,
weight_decay=weight_decay,
scope_suffix=scope_suffix)
else:
if not decoder_use_separable_conv:
scope_suffix = str(i) + scope_suffix
decoder_features = _decoder_with_concat_merge(
decoder_features_list,
decoder_depth,
decoder_use_separable_conv=decoder_use_separable_conv,
weight_decay=weight_decay,
scope_suffix=scope_suffix)
decoder_stage += 1
return decoder_features
def _decoder_with_sum_merge(decoder_features_list,
decoder_depth,
conv2d_kernel=3,
decoder_use_separable_conv=True,
weight_decay=0.0001,
scope_suffix=''):
"""Decoder with sum to merge features.
Args:
decoder_features_list: A list of decoder features.
decoder_depth: Integer, the filters used in the convolution.
conv2d_kernel: Integer, the convolution kernel size.
decoder_use_separable_conv: Boolean, use separable conv or not.
weight_decay: Weight decay for the model variables.
scope_suffix: String, used in the scope suffix.
Returns:
decoder features merged with sum.
Raises:
RuntimeError: If decoder_features_list have length not equal to 2.
"""
if len(decoder_features_list) != 2:
raise RuntimeError('Expect decoder_features has length 2.')
# Only apply one convolution when decoder use sum merge.
if decoder_use_separable_conv:
decoder_features = split_separable_conv2d(
decoder_features_list[0],
filters=decoder_depth,
rate=1,
weight_decay=weight_decay,
scope='decoder_split_sep_conv0'+scope_suffix) + decoder_features_list[1]
else:
decoder_features = slim.conv2d(
decoder_features_list[0],
decoder_depth,
conv2d_kernel,
scope='decoder_conv0'+scope_suffix) + decoder_features_list[1]
return decoder_features
def _decoder_with_concat_merge(decoder_features_list,
decoder_depth,
decoder_use_separable_conv=True,
weight_decay=0.0001,
scope_suffix=''):
"""Decoder with concatenation to merge features.
This decoder method applies two convolutions to smooth the features obtained
by concatenating the input decoder_features_list.
This decoder module is proposed in the DeepLabv3+ paper.
Args:
decoder_features_list: A list of decoder features.
decoder_depth: Integer, the filters used in the convolution.
decoder_use_separable_conv: Boolean, use separable conv or not.
weight_decay: Weight decay for the model variables.
scope_suffix: String, used in the scope suffix.
Returns:
decoder features merged with concatenation.
"""
if decoder_use_separable_conv:
decoder_features = split_separable_conv2d(
tf.concat(decoder_features_list, 3),
filters=decoder_depth,
rate=1,
weight_decay=weight_decay,
scope='decoder_conv0'+scope_suffix)
decoder_features = split_separable_conv2d(
decoder_features,
filters=decoder_depth,
rate=1,
weight_decay=weight_decay,
scope='decoder_conv1'+scope_suffix)
else:
num_convs = 2
decoder_features = slim.repeat(
tf.concat(decoder_features_list, 3),
num_convs,
slim.conv2d,
decoder_depth,
3,
scope='decoder_conv'+scope_suffix)
return decoder_features
def get_branch_logits(features,
num_classes,
atrous_rates=None,
aspp_with_batch_norm=False,
kernel_size=1,
weight_decay=0.0001,
reuse=None,
scope_suffix=''):
"""Gets the logits from each model's branch.
The underlying model is branched out in the last layer when atrous
spatial pyramid pooling is employed, and all branches are sum-merged
to form the final logits.
Args:
features: A float tensor of shape [batch, height, width, channels].
num_classes: Number of classes to predict.
atrous_rates: A list of atrous convolution rates for last layer.
aspp_with_batch_norm: Use batch normalization layers for ASPP.
kernel_size: Kernel size for convolution.
weight_decay: Weight decay for the model variables.
reuse: Reuse model variables or not.
scope_suffix: Scope suffix for the model variables.
Returns:
Merged logits with shape [batch, height, width, num_classes].
Raises:
ValueError: Upon invalid input kernel_size value.
"""
# When using batch normalization with ASPP, ASPP has been applied before
# in extract_features, and thus we simply apply 1x1 convolution here.
if aspp_with_batch_norm or atrous_rates is None:
if kernel_size != 1:
raise ValueError('Kernel size must be 1 when atrous_rates is None or '
'using aspp_with_batch_norm. Gets %d.' % kernel_size)
atrous_rates = [1]
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
reuse=reuse):
with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]):
branch_logits = []
for i, rate in enumerate(atrous_rates):
scope = scope_suffix
if i:
scope += '_%d' % i
branch_logits.append(
slim.conv2d(
features,
num_classes,
kernel_size=kernel_size,
rate=rate,
activation_fn=None,
normalizer_fn=None,
scope=scope))
return tf.add_n(branch_logits)
| 37,051 | 39.627193 | 80 | py |
models | models-master/research/deeplab/export_model.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports trained model to TensorFlow frozen graph."""
import os
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.python.tools import freeze_graph
from deeplab import common
from deeplab import input_preprocess
from deeplab import model
slim = tf.contrib.slim
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path')
flags.DEFINE_string('export_path', None,
'Path to output Tensorflow frozen graph.')
flags.DEFINE_integer('num_classes', 21, 'Number of classes.')
flags.DEFINE_multi_integer('crop_size', [513, 513],
'Crop size [height, width].')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 8,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale inference.
flags.DEFINE_multi_float('inference_scales', [1.0],
'The scales to resize images for inference.')
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images during inference or not.')
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
flags.DEFINE_bool('save_inference_graph', False,
'Save inference graph in text proto.')
# Input name of the exported model.
_INPUT_NAME = 'ImageTensor'
# Output name of the exported predictions.
_OUTPUT_NAME = 'SemanticPredictions'
_RAW_OUTPUT_NAME = 'RawSemanticPredictions'
# Output name of the exported probabilities.
_OUTPUT_PROB_NAME = 'SemanticProbabilities'
_RAW_OUTPUT_PROB_NAME = 'RawSemanticProbabilities'
def _create_input_tensors():
"""Creates and prepares input tensors for DeepLab model.
This method creates a 4-D uint8 image tensor 'ImageTensor' with shape
[1, None, None, 3]. The actual input tensor name to use during inference is
'ImageTensor:0'.
Returns:
image: Preprocessed 4-D float32 tensor with shape [1, crop_height,
crop_width, 3].
original_image_size: Original image shape tensor [height, width].
resized_image_size: Resized image shape tensor [height, width].
"""
# input_preprocess takes 4-D image tensor as input.
input_image = tf.placeholder(tf.uint8, [1, None, None, 3], name=_INPUT_NAME)
original_image_size = tf.shape(input_image)[1:3]
# Squeeze the dimension in axis=0 since `preprocess_image_and_label` assumes
# image to be 3-D.
image = tf.squeeze(input_image, axis=0)
resized_image, image, _ = input_preprocess.preprocess_image_and_label(
image,
label=None,
crop_height=FLAGS.crop_size[0],
crop_width=FLAGS.crop_size[1],
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
is_training=False,
model_variant=FLAGS.model_variant)
resized_image_size = tf.shape(resized_image)[:2]
# Expand the dimension in axis=0, since the following operations assume the
# image to be 4-D.
image = tf.expand_dims(image, 0)
return image, original_image_size, resized_image_size
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)
with tf.Graph().as_default():
image, image_size, resized_image_size = _create_input_tensors()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
crop_size=FLAGS.crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
if tuple(FLAGS.inference_scales) == (1.0,):
tf.logging.info('Exported model performs single-scale inference.')
predictions = model.predict_labels(
image,
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Exported model performs multi-scale inference.')
if FLAGS.quantize_delay_step >= 0:
raise ValueError(
'Quantize mode is not supported with multi-scale test.')
predictions = model.predict_labels_multi_scale(
image,
model_options=model_options,
eval_scales=FLAGS.inference_scales,
add_flipped_images=FLAGS.add_flipped_images)
raw_predictions = tf.identity(
tf.cast(predictions[common.OUTPUT_TYPE], tf.float32),
_RAW_OUTPUT_NAME)
raw_probabilities = tf.identity(
predictions[common.OUTPUT_TYPE + model.PROB_SUFFIX],
_RAW_OUTPUT_PROB_NAME)
# Crop the valid regions from the predictions.
semantic_predictions = raw_predictions[
:, :resized_image_size[0], :resized_image_size[1]]
semantic_probabilities = raw_probabilities[
:, :resized_image_size[0], :resized_image_size[1]]
# Resize back the prediction to the original image size.
def _resize_label(label, label_size):
# Expand dimension of label to [1, height, width, 1] for resize operation.
label = tf.expand_dims(label, 3)
resized_label = tf.image.resize_images(
label,
label_size,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(resized_label, 3), tf.int32)
semantic_predictions = _resize_label(semantic_predictions, image_size)
semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)
semantic_probabilities = tf.image.resize_bilinear(
semantic_probabilities, image_size, align_corners=True,
name=_OUTPUT_PROB_NAME)
if FLAGS.quantize_delay_step >= 0:
contrib_quantize.create_eval_graph()
saver = tf.train.Saver(tf.all_variables())
dirname = os.path.dirname(FLAGS.export_path)
tf.gfile.MakeDirs(dirname)
graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)
freeze_graph.freeze_graph_with_def_protos(
graph_def,
saver.as_saver_def(),
FLAGS.checkpoint_path,
_OUTPUT_NAME + ',' + _OUTPUT_PROB_NAME,
restore_op_name=None,
filename_tensor_name=None,
output_graph=FLAGS.export_path,
clear_devices=True,
initializer_nodes=None)
if FLAGS.save_inference_graph:
tf.train.write_graph(graph_def, dirname, 'inference_graph.pbtxt')
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('export_path')
tf.app.run()
| 7,548 | 36.371287 | 80 | py |
models | models-master/research/deeplab/common_test.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common.py."""
import copy
import tensorflow as tf
from deeplab import common
class CommonTest(tf.test.TestCase):
def testOutputsToNumClasses(self):
num_classes = 21
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: num_classes})
self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE],
num_classes)
def testDeepcopy(self):
num_classes = 21
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: num_classes})
model_options_new = copy.deepcopy(model_options)
self.assertEqual((model_options_new.
outputs_to_num_classes[common.OUTPUT_TYPE]),
num_classes)
num_classes_new = 22
model_options_new.outputs_to_num_classes[common.OUTPUT_TYPE] = (
num_classes_new)
self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE],
num_classes)
self.assertEqual((model_options_new.
outputs_to_num_classes[common.OUTPUT_TYPE]),
num_classes_new)
if __name__ == '__main__':
tf.test.main()
| 1,872 | 34.339623 | 80 | py |
models | models-master/research/deeplab/common.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides flags that are common to scripts.
Common flags from train/eval/vis/export_model.py are collected in this script.
"""
import collections
import copy
import json
import tensorflow as tf
flags = tf.app.flags
# Flags for input preprocessing.
flags.DEFINE_integer('min_resize_value', None,
'Desired size of the smaller image side.')
flags.DEFINE_integer('max_resize_value', None,
'Maximum allowed size of the larger image side.')
flags.DEFINE_integer('resize_factor', None,
'Resized dimensions are multiple of factor plus one.')
flags.DEFINE_boolean('keep_aspect_ratio', True,
'Keep aspect ratio after resizing or not.')
# Model dependent flags.
flags.DEFINE_integer('logits_kernel_size', 1,
'The kernel size for the convolutional kernel that '
'generates logits.')
# When using 'mobilent_v2', we set atrous_rates = decoder_output_stride = None.
# When using 'xception_65' or 'resnet_v1' model variants, we set
# atrous_rates = [6, 12, 18] (output stride 16) and decoder_output_stride = 4.
# See core/feature_extractor.py for supported model variants.
flags.DEFINE_string('model_variant', 'mobilenet_v2', 'DeepLab model variant.')
flags.DEFINE_multi_float('image_pyramid', None,
'Input scales for multi-scale feature extraction.')
flags.DEFINE_boolean('add_image_level_feature', True,
'Add image level feature.')
flags.DEFINE_list(
'image_pooling_crop_size', None,
'Image pooling crop size [height, width] used in the ASPP module. When '
'value is None, the model performs image pooling with "crop_size". This'
'flag is useful when one likes to use different image pooling sizes.')
flags.DEFINE_list(
'image_pooling_stride', '1,1',
'Image pooling stride [height, width] used in the ASPP image pooling. ')
flags.DEFINE_boolean('aspp_with_batch_norm', True,
'Use batch norm parameters for ASPP or not.')
flags.DEFINE_boolean('aspp_with_separable_conv', True,
'Use separable convolution for ASPP or not.')
# Defaults to None. Set multi_grid = [1, 2, 4] when using provided
# 'resnet_v1_{50,101}_beta' checkpoints.
flags.DEFINE_multi_integer('multi_grid', None,
'Employ a hierarchy of atrous rates for ResNet.')
flags.DEFINE_float('depth_multiplier', 1.0,
'Multiplier for the depth (number of channels) for all '
'convolution ops used in MobileNet.')
flags.DEFINE_integer('divisible_by', None,
'An integer that ensures the layer # channels are '
'divisible by this value. Used in MobileNet.')
# For `xception_65`, use decoder_output_stride = 4. For `mobilenet_v2`, use
# decoder_output_stride = None.
flags.DEFINE_list('decoder_output_stride', None,
'Comma-separated list of strings with the number specifying '
'output stride of low-level features at each network level.'
'Current semantic segmentation implementation assumes at '
'most one output stride (i.e., either None or a list with '
'only one element.')
flags.DEFINE_boolean('decoder_use_separable_conv', True,
'Employ separable convolution for decoder or not.')
flags.DEFINE_enum('merge_method', 'max', ['max', 'avg'],
'Scheme to merge multi scale features.')
flags.DEFINE_boolean(
'prediction_with_upsampled_logits', True,
'When performing prediction, there are two options: (1) bilinear '
'upsampling the logits followed by softmax, or (2) softmax followed by '
'bilinear upsampling.')
flags.DEFINE_string(
'dense_prediction_cell_json',
'',
'A JSON file that specifies the dense prediction cell.')
flags.DEFINE_integer(
'nas_stem_output_num_conv_filters', 20,
'Number of filters of the stem output tensor in NAS models.')
flags.DEFINE_bool('nas_use_classification_head', False,
'Use image classification head for NAS model variants.')
flags.DEFINE_bool('nas_remove_os32_stride', False,
'Remove the stride in the output stride 32 branch.')
flags.DEFINE_bool('use_bounded_activation', False,
'Whether or not to use bounded activations. Bounded '
'activations better lend themselves to quantized inference.')
flags.DEFINE_boolean('aspp_with_concat_projection', True,
'ASPP with concat projection.')
flags.DEFINE_boolean('aspp_with_squeeze_and_excitation', False,
'ASPP with squeeze and excitation.')
flags.DEFINE_integer('aspp_convs_filters', 256, 'ASPP convolution filters.')
flags.DEFINE_boolean('decoder_use_sum_merge', False,
'Decoder uses simply sum merge.')
flags.DEFINE_integer('decoder_filters', 256, 'Decoder filters.')
flags.DEFINE_boolean('decoder_output_is_logits', False,
'Use decoder output as logits or not.')
flags.DEFINE_boolean('image_se_uses_qsigmoid', False, 'Use q-sigmoid.')
flags.DEFINE_multi_float(
'label_weights', None,
'A list of label weights, each element represents the weight for the label '
'of its index, for example, label_weights = [0.1, 0.5] means the weight '
'for label 0 is 0.1 and the weight for label 1 is 0.5. If set as None, all '
'the labels have the same weight 1.0.')
flags.DEFINE_float('batch_norm_decay', 0.9997, 'Batchnorm decay.')
FLAGS = flags.FLAGS
# Constants
# Perform semantic segmentation predictions.
OUTPUT_TYPE = 'semantic'
# Semantic segmentation item names.
LABELS_CLASS = 'labels_class'
IMAGE = 'image'
HEIGHT = 'height'
WIDTH = 'width'
IMAGE_NAME = 'image_name'
LABEL = 'label'
ORIGINAL_IMAGE = 'original_image'
# Test set name.
TEST_SET = 'test'
class ModelOptions(
collections.namedtuple('ModelOptions', [
'outputs_to_num_classes',
'crop_size',
'atrous_rates',
'output_stride',
'preprocessed_images_dtype',
'merge_method',
'add_image_level_feature',
'image_pooling_crop_size',
'image_pooling_stride',
'aspp_with_batch_norm',
'aspp_with_separable_conv',
'multi_grid',
'decoder_output_stride',
'decoder_use_separable_conv',
'logits_kernel_size',
'model_variant',
'depth_multiplier',
'divisible_by',
'prediction_with_upsampled_logits',
'dense_prediction_cell_config',
'nas_architecture_options',
'use_bounded_activation',
'aspp_with_concat_projection',
'aspp_with_squeeze_and_excitation',
'aspp_convs_filters',
'decoder_use_sum_merge',
'decoder_filters',
'decoder_output_is_logits',
'image_se_uses_qsigmoid',
'label_weights',
'sync_batch_norm_method',
'batch_norm_decay',
])):
"""Immutable class to hold model options."""
__slots__ = ()
def __new__(cls,
outputs_to_num_classes,
crop_size=None,
atrous_rates=None,
output_stride=8,
preprocessed_images_dtype=tf.float32):
"""Constructor to set default values.
Args:
outputs_to_num_classes: A dictionary from output type to the number of
classes. For example, for the task of semantic segmentation with 21
semantic classes, we would have outputs_to_num_classes['semantic'] = 21.
crop_size: A tuple [crop_height, crop_width].
atrous_rates: A list of atrous convolution rates for ASPP.
output_stride: The ratio of input to output spatial resolution.
preprocessed_images_dtype: The type after the preprocessing function.
Returns:
A new ModelOptions instance.
"""
dense_prediction_cell_config = None
if FLAGS.dense_prediction_cell_json:
with tf.gfile.Open(FLAGS.dense_prediction_cell_json, 'r') as f:
dense_prediction_cell_config = json.load(f)
decoder_output_stride = None
if FLAGS.decoder_output_stride:
decoder_output_stride = [
int(x) for x in FLAGS.decoder_output_stride]
if sorted(decoder_output_stride, reverse=True) != decoder_output_stride:
raise ValueError('Decoder output stride need to be sorted in the '
'descending order.')
image_pooling_crop_size = None
if FLAGS.image_pooling_crop_size:
image_pooling_crop_size = [int(x) for x in FLAGS.image_pooling_crop_size]
image_pooling_stride = [1, 1]
if FLAGS.image_pooling_stride:
image_pooling_stride = [int(x) for x in FLAGS.image_pooling_stride]
label_weights = FLAGS.label_weights
if label_weights is None:
label_weights = 1.0
nas_architecture_options = {
'nas_stem_output_num_conv_filters': (
FLAGS.nas_stem_output_num_conv_filters),
'nas_use_classification_head': FLAGS.nas_use_classification_head,
'nas_remove_os32_stride': FLAGS.nas_remove_os32_stride,
}
return super(ModelOptions, cls).__new__(
cls, outputs_to_num_classes, crop_size, atrous_rates, output_stride,
preprocessed_images_dtype,
FLAGS.merge_method,
FLAGS.add_image_level_feature,
image_pooling_crop_size,
image_pooling_stride,
FLAGS.aspp_with_batch_norm,
FLAGS.aspp_with_separable_conv,
FLAGS.multi_grid,
decoder_output_stride,
FLAGS.decoder_use_separable_conv,
FLAGS.logits_kernel_size,
FLAGS.model_variant,
FLAGS.depth_multiplier,
FLAGS.divisible_by,
FLAGS.prediction_with_upsampled_logits,
dense_prediction_cell_config,
nas_architecture_options,
FLAGS.use_bounded_activation,
FLAGS.aspp_with_concat_projection,
FLAGS.aspp_with_squeeze_and_excitation,
FLAGS.aspp_convs_filters,
FLAGS.decoder_use_sum_merge,
FLAGS.decoder_filters,
FLAGS.decoder_output_is_logits,
FLAGS.image_se_uses_qsigmoid,
label_weights,
'None',
FLAGS.batch_norm_decay)
def __deepcopy__(self, memo):
return ModelOptions(copy.deepcopy(self.outputs_to_num_classes),
self.crop_size,
self.atrous_rates,
self.output_stride,
self.preprocessed_images_dtype)
| 11,156 | 36.692568 | 80 | py |
models | models-master/research/deeplab/eval.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation script for the DeepLab model.
See model.py for more details and usage.
"""
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.contrib import training as contrib_training
from deeplab import common
from deeplab import model
from deeplab.datasets import data_generator
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for evaluating the model.
flags.DEFINE_integer('eval_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_list('eval_crop_size', '513,513',
'Image crop size [height, width] for evaluation.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('eval_split', 'val',
'Which split of the dataset used for evaluation')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_integer('max_number_of_evaluations', 0,
'Maximum number of eval iterations. Will loop '
'indefinitely upon nonpositive values.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
dataset = data_generator.Dataset(
dataset_name=FLAGS.dataset,
split_name=FLAGS.eval_split,
dataset_dir=FLAGS.dataset_dir,
batch_size=FLAGS.eval_batch_size,
crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
model_variant=FLAGS.model_variant,
num_readers=2,
is_training=False,
should_shuffle=False,
should_repeat=False)
tf.gfile.MakeDirs(FLAGS.eval_logdir)
tf.logging.info('Evaluating on %s set', FLAGS.eval_split)
with tf.Graph().as_default():
samples = dataset.get_one_shot_iterator().get_next()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
# Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.
samples[common.IMAGE].set_shape(
[FLAGS.eval_batch_size,
int(FLAGS.eval_crop_size[0]),
int(FLAGS.eval_crop_size[1]),
3])
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(samples[common.IMAGE], model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
if FLAGS.quantize_delay_step >= 0:
raise ValueError(
'Quantize mode is not supported with multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
predictions = tf.reshape(predictions, shape=[-1])
labels = tf.reshape(samples[common.LABEL], shape=[-1])
weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))
# Set ignore_label regions to label 0, because metrics.mean_iou requires
# range of labels = [0, dataset.num_classes). Note the ignore_label regions
# are not evaluated since the corresponding regions contain weights = 0.
labels = tf.where(
tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels)
predictions_tag = 'miou'
for eval_scale in FLAGS.eval_scales:
predictions_tag += '_' + str(eval_scale)
if FLAGS.add_flipped_images:
predictions_tag += '_flipped'
# Define the evaluation metric.
metric_map = {}
num_classes = dataset.num_of_classes
metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou(
labels=labels, predictions=predictions, num_classes=num_classes,
weights=weights)
# IoU for each class.
one_hot_predictions = tf.one_hot(predictions, num_classes)
one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes])
one_hot_labels = tf.one_hot(labels, num_classes)
one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])
for c in range(num_classes):
predictions_tag_c = '%s_class_%d' % (predictions_tag, c)
tp, tp_op = tf.metrics.true_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fp, fp_op = tf.metrics.false_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fn, fn_op = tf.metrics.false_negatives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)
iou = tf.where(tf.greater(tp + fn, 0.0),
tp / (tp + fn + fp),
tf.constant(np.NaN))
metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op)
(metrics_to_values,
metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map)
summary_ops = []
for metric_name, metric_value in six.iteritems(metrics_to_values):
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
summary_op = tf.summary.merge(summary_ops)
summary_hook = contrib_training.SummaryAtEndHook(
log_dir=FLAGS.eval_logdir, summary_op=summary_op)
hooks = [summary_hook]
num_eval_iters = None
if FLAGS.max_number_of_evaluations > 0:
num_eval_iters = FLAGS.max_number_of_evaluations
if FLAGS.quantize_delay_step >= 0:
contrib_quantize.create_eval_graph()
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
contrib_training.evaluate_repeatedly(
checkpoint_dir=FLAGS.checkpoint_dir,
master=FLAGS.master,
eval_ops=list(metrics_to_updates.values()),
max_number_of_evaluations=num_eval_iters,
hooks=hooks,
eval_interval_secs=FLAGS.eval_interval_secs)
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('eval_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
| 8,972 | 38.355263 | 80 | py |
models | models-master/research/deeplab/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/train.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for the DeepLab model.
See model.py for more details and usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import tfprof as contrib_tfprof
from deeplab import common
from deeplab import model
from deeplab.datasets import data_generator
from deeplab.utils import train_utils
from deployment import model_deploy
slim = tf.contrib.slim
flags = tf.app.flags
FLAGS = flags.FLAGS
# Settings for multi-GPUs/multi-replicas training.
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.')
flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker.')
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID.')
# Settings for logging.
flags.DEFINE_string('train_logdir', None,
'Where the checkpoint and logs are stored.')
flags.DEFINE_integer('log_steps', 10,
'Display logging information at every log_steps.')
flags.DEFINE_integer('save_interval_secs', 1200,
'How often, in seconds, we save the model to disk.')
flags.DEFINE_integer('save_summaries_secs', 600,
'How often, in seconds, we compute the summaries.')
flags.DEFINE_boolean(
'save_summaries_images', False,
'Save sample inputs, labels, and semantic predictions as '
'images to summary.')
# Settings for profiling.
flags.DEFINE_string('profile_logdir', None,
'Where the profile files are stored.')
# Settings for training strategy.
flags.DEFINE_enum('optimizer', 'momentum', ['momentum', 'adam'],
'Which optimizer to use.')
# Momentum optimizer flags
flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'],
'Learning rate policy for training.')
# Use 0.007 when training on PASCAL augmented training set, train_aug. When
# fine-tuning on PASCAL trainval set, use learning rate=0.0001.
flags.DEFINE_float('base_learning_rate', .0001,
'The base learning rate for model training.')
flags.DEFINE_float('decay_steps', 0.0,
'Decay steps for polynomial learning rate schedule.')
flags.DEFINE_float('end_learning_rate', 0.0,
'End learning rate for polynomial learning rate schedule.')
flags.DEFINE_float('learning_rate_decay_factor', 0.1,
'The rate to decay the base learning rate.')
flags.DEFINE_integer('learning_rate_decay_step', 2000,
'Decay the base learning rate at a fixed step.')
flags.DEFINE_float('learning_power', 0.9,
'The power value used in the poly learning policy.')
flags.DEFINE_integer('training_number_of_steps', 30000,
'The number of steps used for training')
flags.DEFINE_float('momentum', 0.9, 'The momentum value to use')
# Adam optimizer flags
flags.DEFINE_float('adam_learning_rate', 0.001,
'Learning rate for the adam optimizer.')
flags.DEFINE_float('adam_epsilon', 1e-08, 'Adam optimizer epsilon.')
# When fine_tune_batch_norm=True, use at least batch size larger than 12
# (batch size more than 16 is better). Otherwise, one could use smaller batch
# size and set fine_tune_batch_norm=False.
flags.DEFINE_integer('train_batch_size', 8,
'The number of images in each batch during training.')
# For weight_decay, use 0.00004 for MobileNet-V2 or Xcpetion model variants.
# Use 0.0001 for ResNet model variants.
flags.DEFINE_float('weight_decay', 0.00004,
'The value of the weight decay for training.')
flags.DEFINE_list('train_crop_size', '513,513',
'Image crop size [height, width] during training.')
flags.DEFINE_float(
'last_layer_gradient_multiplier', 1.0,
'The gradient multiplier for last layers, which is used to '
'boost the gradient of last layers if the value > 1.')
flags.DEFINE_boolean('upsample_logits', True,
'Upsample logits during training.')
# Hyper-parameters for NAS training strategy.
flags.DEFINE_float(
'drop_path_keep_prob', 1.0,
'Probability to keep each path in the NAS cell when training.')
# Settings for fine-tuning the network.
flags.DEFINE_string('tf_initial_checkpoint', None,
'The initial checkpoint in tensorflow format.')
# Set to False if one does not want to re-use the trained classifier weights.
flags.DEFINE_boolean('initialize_last_layer', True,
'Initialize the last layer.')
flags.DEFINE_boolean('last_layers_contain_logits_only', False,
'Only consider logits as last layers or not.')
flags.DEFINE_integer('slow_start_step', 0,
'Training model with small learning rate for few steps.')
flags.DEFINE_float('slow_start_learning_rate', 1e-4,
'Learning rate employed during slow start.')
# Set to True if one wants to fine-tune the batch norm parameters in DeepLabv3.
# Set to False and use small batch size to save GPU memory.
flags.DEFINE_boolean('fine_tune_batch_norm', True,
'Fine tune the batch norm parameters or not.')
flags.DEFINE_float('min_scale_factor', 0.5,
'Mininum scale factor for data augmentation.')
flags.DEFINE_float('max_scale_factor', 2.,
'Maximum scale factor for data augmentation.')
flags.DEFINE_float('scale_factor_step_size', 0.25,
'Scale factor step size for data augmentation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Hard example mining related flags.
flags.DEFINE_integer(
'hard_example_mining_step', 0,
'The training step in which exact hard example mining kicks off. Note we '
'gradually reduce the mining percent to the specified '
'top_k_percent_pixels. For example, if hard_example_mining_step=100K and '
'top_k_percent_pixels=0.25, then mining percent will gradually reduce from '
'100% to 25% until 100K steps after which we only mine top 25% pixels.')
flags.DEFINE_float(
'top_k_percent_pixels', 1.0,
'The top k percent pixels (in terms of the loss values) used to compute '
'loss during training. This is useful for hard pixel mining.')
# Quantization setting.
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('train_split', 'train',
'Which split of the dataset to be used for training')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
def _build_deeplab(iterator, outputs_to_num_classes, ignore_label):
"""Builds a clone of DeepLab.
Args:
iterator: An iterator of type tf.data.Iterator for images and labels.
outputs_to_num_classes: A map from output type to the number of classes. For
example, for the task of semantic segmentation with 21 semantic classes,
we would have outputs_to_num_classes['semantic'] = 21.
ignore_label: Ignore label.
"""
samples = iterator.get_next()
# Add name to input and label nodes so we can add to summary.
samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name=common.IMAGE)
samples[common.LABEL] = tf.identity(samples[common.LABEL], name=common.LABEL)
model_options = common.ModelOptions(
outputs_to_num_classes=outputs_to_num_classes,
crop_size=[int(sz) for sz in FLAGS.train_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
outputs_to_scales_to_logits = model.multi_scale_logits(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid,
weight_decay=FLAGS.weight_decay,
is_training=True,
fine_tune_batch_norm=FLAGS.fine_tune_batch_norm,
nas_training_hyper_parameters={
'drop_path_keep_prob': FLAGS.drop_path_keep_prob,
'total_training_steps': FLAGS.training_number_of_steps,
})
# Add name to graph node so we can add to summary.
output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE)
for output, num_classes in six.iteritems(outputs_to_num_classes):
train_utils.add_softmax_cross_entropy_loss_for_each_scale(
outputs_to_scales_to_logits[output],
samples[common.LABEL],
num_classes,
ignore_label,
loss_weight=model_options.label_weights,
upsample_logits=FLAGS.upsample_logits,
hard_example_mining_step=FLAGS.hard_example_mining_step,
top_k_percent_pixels=FLAGS.top_k_percent_pixels,
scope=output)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Set up deployment (i.e., multi-GPUs and/or multi-replicas).
config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.num_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Split the batch across GPUs.
assert FLAGS.train_batch_size % config.num_clones == 0, (
'Training batch size not divisble by number of clones (GPUs).')
clone_batch_size = FLAGS.train_batch_size // config.num_clones
tf.gfile.MakeDirs(FLAGS.train_logdir)
tf.logging.info('Training on %s set', FLAGS.train_split)
with tf.Graph().as_default() as graph:
with tf.device(config.inputs_device()):
dataset = data_generator.Dataset(
dataset_name=FLAGS.dataset,
split_name=FLAGS.train_split,
dataset_dir=FLAGS.dataset_dir,
batch_size=clone_batch_size,
crop_size=[int(sz) for sz in FLAGS.train_crop_size],
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
min_scale_factor=FLAGS.min_scale_factor,
max_scale_factor=FLAGS.max_scale_factor,
scale_factor_step_size=FLAGS.scale_factor_step_size,
model_variant=FLAGS.model_variant,
num_readers=4,
is_training=True,
should_shuffle=True,
should_repeat=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = tf.train.get_or_create_global_step()
# Define the model and create clones.
model_fn = _build_deeplab
model_args = (dataset.get_one_shot_iterator(), {
common.OUTPUT_TYPE: dataset.num_of_classes
}, dataset.ignore_label)
clones = model_deploy.create_clones(config, model_fn, args=model_args)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
first_clone_scope = config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for model variables.
for model_var in tf.model_variables():
summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# Add summaries for images, labels, semantic predictions
if FLAGS.save_summaries_images:
summary_image = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/'))
summaries.add(
tf.summary.image('samples/%s' % common.IMAGE, summary_image))
first_clone_label = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/'))
# Scale up summary image pixel values for better visualization.
pixel_scaling = max(1, 255 // dataset.num_of_classes)
summary_label = tf.cast(first_clone_label * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image('samples/%s' % common.LABEL, summary_label))
first_clone_output = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/'))
predictions = tf.expand_dims(tf.argmax(first_clone_output, 3), -1)
summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image(
'samples/%s' % common.OUTPUT_TYPE, summary_predictions))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Build the optimizer based on the device specification.
with tf.device(config.optimizer_device()):
learning_rate = train_utils.get_model_learning_rate(
FLAGS.learning_policy,
FLAGS.base_learning_rate,
FLAGS.learning_rate_decay_step,
FLAGS.learning_rate_decay_factor,
FLAGS.training_number_of_steps,
FLAGS.learning_power,
FLAGS.slow_start_step,
FLAGS.slow_start_learning_rate,
decay_steps=FLAGS.decay_steps,
end_learning_rate=FLAGS.end_learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.adam_learning_rate, epsilon=FLAGS.adam_epsilon)
else:
raise ValueError('Unknown optimizer')
if FLAGS.quantize_delay_step >= 0:
if FLAGS.num_clones > 1:
raise ValueError('Quantization doesn\'t support multi-clone yet.')
contrib_quantize.create_training_graph(
quant_delay=FLAGS.quantize_delay_step)
startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps
with tf.device(config.variables_device()):
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, optimizer)
total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.')
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Modify the gradients for biases and last layer variables.
last_layers = model.get_extra_layer_scopes(
FLAGS.last_layers_contain_logits_only)
grad_mult = train_utils.get_model_gradient_multipliers(
last_layers, FLAGS.last_layer_gradient_multiplier)
if grad_mult:
grads_and_vars = slim.learning.multiply_gradients(
grads_and_vars, grad_mult)
# Create gradient update op.
grad_updates = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries))
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
# Start the training.
profile_dir = FLAGS.profile_logdir
if profile_dir is not None:
tf.gfile.MakeDirs(profile_dir)
with contrib_tfprof.ProfileContext(
enabled=profile_dir is not None, profile_dir=profile_dir):
init_fn = None
if FLAGS.tf_initial_checkpoint:
init_fn = train_utils.get_model_init_fn(
FLAGS.train_logdir,
FLAGS.tf_initial_checkpoint,
FLAGS.initialize_last_layer,
last_layers,
ignore_missing_vars=True)
slim.learning.train(
train_tensor,
logdir=FLAGS.train_logdir,
log_every_n_steps=FLAGS.log_steps,
master=FLAGS.master,
number_of_steps=FLAGS.training_number_of_steps,
is_chief=(FLAGS.task == 0),
session_config=session_config,
startup_delay_steps=startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
flags.mark_flag_as_required('train_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
| 18,288 | 38.331183 | 80 | py |
models | models-master/research/deeplab/model_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DeepLab model and some helper functions."""
import tensorflow as tf
from deeplab import common
from deeplab import model
class DeeplabModelTest(tf.test.TestCase):
def testWrongDeepLabVariant(self):
model_options = common.ModelOptions([])._replace(
model_variant='no_such_variant')
with self.assertRaises(ValueError):
model._get_logits(images=[], model_options=model_options)
def testBuildDeepLabv2(self):
batch_size = 2
crop_size = [41, 41]
# Test with two image_pyramids.
image_pyramids = [[1], [0.5, 1]]
# Test two model variants.
model_variants = ['xception_65', 'mobilenet_v2']
# Test with two output_types.
outputs_to_num_classes = {'semantic': 3,
'direction': 2}
expected_endpoints = [['merged_logits'],
['merged_logits',
'logits_0.50',
'logits_1.00']]
expected_num_logits = [1, 3]
for model_variant in model_variants:
model_options = common.ModelOptions(outputs_to_num_classes)._replace(
add_image_level_feature=False,
aspp_with_batch_norm=False,
aspp_with_separable_conv=False,
model_variant=model_variant)
for i, image_pyramid in enumerate(image_pyramids):
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g):
inputs = tf.random_uniform(
(batch_size, crop_size[0], crop_size[1], 3))
outputs_to_scales_to_logits = model.multi_scale_logits(
inputs, model_options, image_pyramid=image_pyramid)
# Check computed results for each output type.
for output in outputs_to_num_classes:
scales_to_logits = outputs_to_scales_to_logits[output]
self.assertListEqual(sorted(scales_to_logits.keys()),
sorted(expected_endpoints[i]))
# Expected number of logits = len(image_pyramid) + 1, since the
# last logits is merged from all the scales.
self.assertEqual(len(scales_to_logits), expected_num_logits[i])
def testForwardpassDeepLabv3plus(self):
crop_size = [33, 33]
outputs_to_num_classes = {'semantic': 3}
model_options = common.ModelOptions(
outputs_to_num_classes,
crop_size,
output_stride=16
)._replace(
add_image_level_feature=True,
aspp_with_batch_norm=True,
logits_kernel_size=1,
decoder_output_stride=[4],
model_variant='mobilenet_v2') # Employ MobileNetv2 for fast test.
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
inputs = tf.random_uniform(
(1, crop_size[0], crop_size[1], 3))
outputs_to_scales_to_logits = model.multi_scale_logits(
inputs,
model_options,
image_pyramid=[1.0])
sess.run(tf.global_variables_initializer())
outputs_to_scales_to_logits = sess.run(outputs_to_scales_to_logits)
# Check computed results for each output type.
for output in outputs_to_num_classes:
scales_to_logits = outputs_to_scales_to_logits[output]
# Expect only one output.
self.assertEqual(len(scales_to_logits), 1)
for logits in scales_to_logits.values():
self.assertTrue(logits.any())
def testBuildDeepLabWithDensePredictionCell(self):
batch_size = 1
crop_size = [33, 33]
outputs_to_num_classes = {'semantic': 2}
expected_endpoints = ['merged_logits']
dense_prediction_cell_config = [
{'kernel': 3, 'rate': [1, 6], 'op': 'conv', 'input': -1},
{'kernel': 3, 'rate': [18, 15], 'op': 'conv', 'input': 0},
]
model_options = common.ModelOptions(
outputs_to_num_classes,
crop_size,
output_stride=16)._replace(
aspp_with_batch_norm=True,
model_variant='mobilenet_v2',
dense_prediction_cell_config=dense_prediction_cell_config)
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g):
inputs = tf.random_uniform(
(batch_size, crop_size[0], crop_size[1], 3))
outputs_to_scales_to_model_results = model.multi_scale_logits(
inputs,
model_options,
image_pyramid=[1.0])
for output in outputs_to_num_classes:
scales_to_model_results = outputs_to_scales_to_model_results[output]
self.assertListEqual(
list(scales_to_model_results), expected_endpoints)
self.assertEqual(len(scales_to_model_results), 1)
if __name__ == '__main__':
tf.test.main()
| 5,456 | 35.624161 | 80 | py |
models | models-master/research/deeplab/deprecated/segmentation_dataset.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data from semantic segmentation datasets.
The SegmentationDataset class provides both images and annotations (semantic
segmentation and/or instance segmentation) for TensorFlow. Currently, we
support the following datasets:
1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/).
PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects
(e.g., bike, person, and so on) and leaves all the other semantic classes as
one background class. The dataset contains 1464, 1449, and 1456 annotated
images for the training, validation and test respectively.
2. Cityscapes dataset (https://www.cityscapes-dataset.com)
The Cityscapes dataset contains 19 semantic labels (such as road, person, car,
and so on) for urban street scenes.
3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K)
The ADE20K dataset contains 150 semantic labels both urban street scenes and
indoor scenes.
References:
M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn,
and A. Zisserman, The pascal visual object classes challenge a retrospective.
IJCV, 2014.
M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson,
U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban
scene understanding," In Proc. of CVPR, 2016.
B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing
through ADE20K dataset", In Proc. of CVPR, 2017.
"""
import collections
import os.path
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
dataset = slim.dataset
tfexample_decoder = slim.tfexample_decoder
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'labels_class': ('A semantic segmentation label whose size matches image.'
'Its values range from 0 (background) to num_classes.'),
}
# Named tuple to describe the dataset properties.
DatasetDescriptor = collections.namedtuple(
'DatasetDescriptor',
['splits_to_sizes', # Splits of the dataset into training, val, and test.
'num_classes', # Number of semantic classes, including the background
# class (if exists). For example, there are 20
# foreground classes + 1 background class in the PASCAL
# VOC 2012 dataset. Thus, we set num_classes=21.
'ignore_label', # Ignore label value.
]
)
_CITYSCAPES_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train_fine': 2975,
'val_fine': 500,
},
num_classes=19,
ignore_label=255,
)
_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 1464,
'train_aug': 10582,
'trainval': 2913,
'val': 1449,
},
num_classes=21,
ignore_label=255,
)
# These number (i.e., 'train'/'test') seems to have to be hard coded
# You are required to figure it out for your training/testing example.
_ADE20K_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 20210, # num of samples in images/training
'val': 2000, # num of samples in images/validation
},
num_classes=151,
ignore_label=0,
)
_DATASETS_INFORMATION = {
'cityscapes': _CITYSCAPES_INFORMATION,
'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION,
'ade20k': _ADE20K_INFORMATION,
}
# Default file pattern of TFRecord of TensorFlow Example.
_FILE_PATTERN = '%s-*'
def get_cityscapes_dataset_name():
return 'cityscapes'
def get_dataset(dataset_name, split_name, dataset_dir):
"""Gets an instance of slim Dataset.
Args:
dataset_name: Dataset name.
split_name: A train/val Split name.
dataset_dir: The directory of the dataset sources.
Returns:
An instance of slim Dataset.
Raises:
ValueError: if the dataset_name or split_name is not recognized.
"""
if dataset_name not in _DATASETS_INFORMATION:
raise ValueError('The specified dataset is not supported yet.')
splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes
if split_name not in splits_to_sizes:
raise ValueError('data split name %s not recognized' % split_name)
# Prepare the variables for different datasets.
num_classes = _DATASETS_INFORMATION[dataset_name].num_classes
ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Specify how the TF-Examples are decoded.
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/filename': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature(
(), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature(
(), tf.int64, default_value=0),
'image/segmentation/class/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/segmentation/class/format': tf.FixedLenFeature(
(), tf.string, default_value='png'),
}
items_to_handlers = {
'image': tfexample_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3),
'image_name': tfexample_decoder.Tensor('image/filename'),
'height': tfexample_decoder.Tensor('image/height'),
'width': tfexample_decoder.Tensor('image/width'),
'labels_class': tfexample_decoder.Image(
image_key='image/segmentation/class/encoded',
format_key='image/segmentation/class/format',
channels=1),
}
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
return dataset.Dataset(
data_sources=file_pattern,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=splits_to_sizes[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
ignore_label=ignore_label,
num_classes=num_classes,
name=dataset_name,
multi_label=True)
| 6,847 | 33.069652 | 80 | py |
models | models-master/research/deeplab/deprecated/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/evaluation/parsing_covering.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Parsing Covering metric.
Parsing Covering is a region-based metric for evaluating the task of
image parsing, aka panoptic segmentation.
Please see the paper for details:
"DeeperLab: Single-Shot Image Parser", Tien-Ju Yang, Maxwell D. Collins,
Yukun Zhu, Jyh-Jing Hwang, Ting Liu, Xiao Zhang, Vivienne Sze,
George Papandreou, Liang-Chieh Chen. arXiv: 1902.05093, 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import prettytable
import six
from deeplab.evaluation import base_metric
class ParsingCovering(base_metric.SegmentationMetric):
r"""Metric class for Parsing Covering.
Computes segmentation covering metric introduced in (Arbelaez, et al., 2010)
with extension to handle multi-class semantic labels (a.k.a. parsing
covering). Specifically, segmentation covering (SC) is defined in Eq. (8) in
(Arbelaez et al., 2010) as:
SC(c) = \sum_{R\in S}(|R| * \max_{R'\in S'}O(R,R')) / \sum_{R\in S}|R|,
where S are the groundtruth instance regions and S' are the predicted
instance regions. The parsing covering is simply:
PC = \sum_{c=1}^{C}SC(c) / C,
where C is the number of classes.
"""
def __init__(self,
num_categories,
ignored_label,
max_instances_per_category,
offset,
normalize_by_image_size=True):
"""Initialization for ParsingCovering.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between groundtruth and predicted segments.
normalize_by_image_size: Whether to normalize groundtruth instance region
areas by image size. If True, groundtruth instance areas and weighted
IoUs will be divided by the size of the corresponding image before
accumulated across the dataset.
"""
super(ParsingCovering, self).__init__(num_categories, ignored_label,
max_instances_per_category, offset)
self.normalize_by_image_size = normalize_by_image_size
def compare_and_accumulate(
self, groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array):
"""See base class."""
# Allocate intermediate data structures.
max_ious = np.zeros([self.num_categories, self.max_instances_per_category],
dtype=np.float64)
gt_areas = np.zeros([self.num_categories, self.max_instances_per_category],
dtype=np.float64)
pred_areas = np.zeros(
[self.num_categories, self.max_instances_per_category],
dtype=np.float64)
# This is a dictionary in the format:
# {(category, gt_instance): [(pred_instance, intersection_area)]}.
intersections = collections.defaultdict(list)
# First, combine the category and instance labels so that every unique
# value for (category, instance) is assigned a unique integer label.
pred_segment_id = self._naively_combine_labels(predicted_category_array,
predicted_instance_array)
gt_segment_id = self._naively_combine_labels(groundtruth_category_array,
groundtruth_instance_array)
# Next, combine the groundtruth and predicted labels. Dividing up the pixels
# based on which groundtruth segment and which predicted segment they belong
# to, this will assign a different 32-bit integer label to each choice
# of (groundtruth segment, predicted segment), encoded as
# gt_segment_id * offset + pred_segment_id.
intersection_id_array = (
gt_segment_id.astype(np.uint32) * self.offset +
pred_segment_id.astype(np.uint32))
# For every combination of (groundtruth segment, predicted segment) with a
# non-empty intersection, this counts the number of pixels in that
# intersection.
intersection_ids, intersection_areas = np.unique(
intersection_id_array, return_counts=True)
# Find areas of all groundtruth and predicted instances, as well as of their
# intersections.
for intersection_id, intersection_area in six.moves.zip(
intersection_ids, intersection_areas):
gt_segment_id = intersection_id // self.offset
gt_category = gt_segment_id // self.max_instances_per_category
if gt_category == self.ignored_label:
continue
gt_instance = gt_segment_id % self.max_instances_per_category
gt_areas[gt_category, gt_instance] += intersection_area
pred_segment_id = intersection_id % self.offset
pred_category = pred_segment_id // self.max_instances_per_category
pred_instance = pred_segment_id % self.max_instances_per_category
pred_areas[pred_category, pred_instance] += intersection_area
if pred_category != gt_category:
continue
intersections[gt_category, gt_instance].append((pred_instance,
intersection_area))
# Find maximum IoU for every groundtruth instance.
for gt_label, instance_intersections in six.iteritems(intersections):
category, gt_instance = gt_label
gt_area = gt_areas[category, gt_instance]
ious = []
for pred_instance, intersection_area in instance_intersections:
pred_area = pred_areas[category, pred_instance]
union = gt_area + pred_area - intersection_area
ious.append(intersection_area / union)
max_ious[category, gt_instance] = max(ious)
# Normalize groundtruth instance areas by image size if necessary.
if self.normalize_by_image_size:
gt_areas /= groundtruth_category_array.size
# Compute per-class weighted IoUs and areas summed over all groundtruth
# instances.
self.weighted_iou_per_class += np.sum(max_ious * gt_areas, axis=-1)
self.gt_area_per_class += np.sum(gt_areas, axis=-1)
return self.result()
def result_per_category(self):
"""See base class."""
return base_metric.realdiv_maybe_zero(self.weighted_iou_per_class,
self.gt_area_per_class)
def _valid_categories(self):
"""Categories with a "valid" value for the metric, have > 0 instances.
We will ignore the `ignore_label` class and other classes which have
groundtruth area of 0.
Returns:
Boolean array of shape `[num_categories]`.
"""
valid_categories = np.not_equal(self.gt_area_per_class, 0)
if self.ignored_label >= 0 and self.ignored_label < self.num_categories:
valid_categories[self.ignored_label] = False
return valid_categories
def detailed_results(self, is_thing=None):
"""See base class."""
valid_categories = self._valid_categories()
# If known, break down which categories are valid _and_ things/stuff.
category_sets = collections.OrderedDict()
category_sets['All'] = valid_categories
if is_thing is not None:
category_sets['Things'] = np.logical_and(valid_categories, is_thing)
category_sets['Stuff'] = np.logical_and(valid_categories,
np.logical_not(is_thing))
covering_per_class = self.result_per_category()
results = {}
for category_set_name, in_category_set in six.iteritems(category_sets):
if np.any(in_category_set):
results[category_set_name] = {
'pc': np.mean(covering_per_class[in_category_set]),
# The number of valid categories in this subset.
'n': np.sum(in_category_set.astype(np.int32)),
}
else:
results[category_set_name] = {'pc': 0, 'n': 0}
return results
def print_detailed_results(self, is_thing=None, print_digits=3):
"""See base class."""
results = self.detailed_results(is_thing=is_thing)
tab = prettytable.PrettyTable()
tab.add_column('', [], align='l')
for fieldname in ['PC', 'N']:
tab.add_column(fieldname, [], align='r')
for category_set, subset_results in six.iteritems(results):
data_cols = [
round(subset_results['pc'], print_digits) * 100, subset_results['n']
]
tab.add_row([category_set] + data_cols)
print(tab)
def result(self):
"""See base class."""
covering_per_class = self.result_per_category()
valid_categories = self._valid_categories()
if not np.any(valid_categories):
return 0.
return np.mean(covering_per_class[valid_categories])
def merge(self, other_instance):
"""See base class."""
self.weighted_iou_per_class += other_instance.weighted_iou_per_class
self.gt_area_per_class += other_instance.gt_area_per_class
def reset(self):
"""See base class."""
self.weighted_iou_per_class = np.zeros(
self.num_categories, dtype=np.float64)
self.gt_area_per_class = np.zeros(self.num_categories, dtype=np.float64)
| 10,118 | 39.967611 | 80 | py |
models | models-master/research/deeplab/evaluation/eval_coco_format.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes evaluation metrics on groundtruth and predictions in COCO format.
The Common Objects in Context (COCO) dataset defines a format for specifying
combined semantic and instance segmentations as "panoptic" segmentations. This
is done with the combination of JSON and image files as specified at:
http://cocodataset.org/#format-results
where the JSON file specifies the overall structure of the result,
including the categories for each annotation, and the images specify the image
region for each annotation in that image by its ID.
This script computes additional metrics such as Parsing Covering on datasets and
predictions in this format. An implementation of Panoptic Quality is also
provided for convenience.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import utils as panopticapi_utils
import six
from deeplab.evaluation import panoptic_quality
from deeplab.evaluation import parsing_covering
FLAGS = flags.FLAGS
flags.DEFINE_string(
'gt_json_file', None,
' Path to a JSON file giving ground-truth annotations in COCO format.')
flags.DEFINE_string('pred_json_file', None,
'Path to a JSON file for the predictions to evaluate.')
flags.DEFINE_string(
'gt_folder', None,
'Folder containing panoptic-format ID images to match ground-truth '
'annotations to image regions.')
flags.DEFINE_string('pred_folder', None,
'Folder containing ID images for predictions.')
flags.DEFINE_enum(
'metric', 'pq', ['pq', 'pc'], 'Shorthand name of a metric to compute. '
'Supported values are:\n'
'Panoptic Quality (pq)\n'
'Parsing Covering (pc)')
flags.DEFINE_integer(
'num_categories', 201,
'The number of segmentation categories (or "classes") in the dataset.')
flags.DEFINE_integer(
'ignored_label', 0,
'A category id that is ignored in evaluation, e.g. the void label as '
'defined in COCO panoptic segmentation dataset.')
flags.DEFINE_integer(
'max_instances_per_category', 256,
'The maximum number of instances for each category. Used in ensuring '
'unique instance labels.')
flags.DEFINE_integer('intersection_offset', None,
'The maximum number of unique labels.')
flags.DEFINE_bool(
'normalize_by_image_size', True,
'Whether to normalize groundtruth instance region areas by image size. If '
'True, groundtruth instance areas and weighted IoUs will be divided by the '
'size of the corresponding image before accumulated across the dataset. '
'Only used for Parsing Covering (pc) evaluation.')
flags.DEFINE_integer(
'num_workers', 0, 'If set to a positive number, will spawn child processes '
'to compute parts of the metric in parallel by splitting '
'the images between the workers. If set to -1, will use '
'the value of multiprocessing.cpu_count().')
flags.DEFINE_integer('print_digits', 3,
'Number of significant digits to print in metrics.')
def _build_metric(metric,
num_categories,
ignored_label,
max_instances_per_category,
intersection_offset=None,
normalize_by_image_size=True):
"""Creates a metric aggregator objet of the given name."""
if metric == 'pq':
logging.warning('One should check Panoptic Quality results against the '
'official COCO API code. Small numerical differences '
'(< 0.1%) can be magnified by rounding.')
return panoptic_quality.PanopticQuality(num_categories, ignored_label,
max_instances_per_category,
intersection_offset)
elif metric == 'pc':
return parsing_covering.ParsingCovering(
num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
else:
raise ValueError('No implementation for metric "%s"' % metric)
def _matched_annotations(gt_json, pred_json):
"""Yields a set of (groundtruth, prediction) image annotation pairs.."""
image_id_to_pred_ann = {
annotation['image_id']: annotation
for annotation in pred_json['annotations']
}
for gt_ann in gt_json['annotations']:
image_id = gt_ann['image_id']
pred_ann = image_id_to_pred_ann[image_id]
yield gt_ann, pred_ann
def _open_panoptic_id_image(image_path):
"""Loads a COCO-format panoptic ID image from file."""
return panopticapi_utils.rgb2id(
np.array(Image.open(image_path), dtype=np.uint32))
def _split_panoptic(ann_json, id_array, ignored_label, allow_crowds):
"""Given the COCO JSON and ID map, splits into categories and instances."""
category = np.zeros(id_array.shape, np.uint16)
instance = np.zeros(id_array.shape, np.uint16)
next_instance_id = collections.defaultdict(int)
# Skip instance label 0 for ignored label. That is reserved for void.
next_instance_id[ignored_label] = 1
for segment_info in ann_json['segments_info']:
if allow_crowds and segment_info['iscrowd']:
category_id = ignored_label
else:
category_id = segment_info['category_id']
mask = np.equal(id_array, segment_info['id'])
category[mask] = category_id
instance[mask] = next_instance_id[category_id]
next_instance_id[category_id] += 1
return category, instance
def _category_and_instance_from_annotation(ann_json, folder, ignored_label,
allow_crowds):
"""Given the COCO JSON annotations, finds maps of categories and instances."""
panoptic_id_image = _open_panoptic_id_image(
os.path.join(folder, ann_json['file_name']))
return _split_panoptic(ann_json, panoptic_id_image, ignored_label,
allow_crowds)
def _compute_metric(metric_aggregator, gt_folder, pred_folder,
annotation_pairs):
"""Iterates over matched annotation pairs and computes a metric over them."""
for gt_ann, pred_ann in annotation_pairs:
# We only expect "iscrowd" to appear in the ground-truth, and not in model
# output. In predicted JSON it is simply ignored, as done in official code.
gt_category, gt_instance = _category_and_instance_from_annotation(
gt_ann, gt_folder, metric_aggregator.ignored_label, True)
pred_category, pred_instance = _category_and_instance_from_annotation(
pred_ann, pred_folder, metric_aggregator.ignored_label, False)
metric_aggregator.compare_and_accumulate(gt_category, gt_instance,
pred_category, pred_instance)
return metric_aggregator
def _iterate_work_queue(work_queue):
"""Creates an iterable that retrieves items from a queue until one is None."""
task = work_queue.get(block=True)
while task is not None:
yield task
task = work_queue.get(block=True)
def _run_metrics_worker(metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue):
result = _compute_metric(metric_aggregator, gt_folder, pred_folder,
_iterate_work_queue(work_queue))
result_queue.put(result, block=True)
def _is_thing_array(categories_json, ignored_label):
"""is_thing[category_id] is a bool on if category is "thing" or "stuff"."""
is_thing_dict = {}
for category_json in categories_json:
is_thing_dict[category_json['id']] = bool(category_json['isthing'])
# Check our assumption that the category ids are consecutive.
# Usually metrics should be able to handle this case, but adding a warning
# here.
max_category_id = max(six.iterkeys(is_thing_dict))
if len(is_thing_dict) != max_category_id + 1:
seen_ids = six.viewkeys(is_thing_dict)
all_ids = set(six.moves.range(max_category_id + 1))
unseen_ids = all_ids.difference(seen_ids)
if unseen_ids != {ignored_label}:
logging.warning(
'Nonconsecutive category ids or no category JSON specified for ids: '
'%s', unseen_ids)
is_thing_array = np.zeros(max_category_id + 1)
for category_id, is_thing in six.iteritems(is_thing_dict):
is_thing_array[category_id] = is_thing
return is_thing_array
def eval_coco_format(gt_json_file,
pred_json_file,
gt_folder=None,
pred_folder=None,
metric='pq',
num_categories=201,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=None,
normalize_by_image_size=True,
num_workers=0,
print_digits=3):
"""Top-level code to compute metrics on a COCO-format result.
Note that the default values are set for COCO panoptic segmentation dataset,
and thus the users may want to change it for their own dataset evaluation.
Args:
gt_json_file: Path to a JSON file giving ground-truth annotations in COCO
format.
pred_json_file: Path to a JSON file for the predictions to evaluate.
gt_folder: Folder containing panoptic-format ID images to match ground-truth
annotations to image regions.
pred_folder: Folder containing ID images for predictions.
metric: Name of a metric to compute.
num_categories: The number of segmentation categories (or "classes") in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the "void"
label as defined in the COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
intersection_offset: The maximum number of unique labels.
normalize_by_image_size: Whether to normalize groundtruth instance region
areas by image size. If True, groundtruth instance areas and weighted IoUs
will be divided by the size of the corresponding image before accumulated
across the dataset. Only used for Parsing Covering (pc) evaluation.
num_workers: If set to a positive number, will spawn child processes to
compute parts of the metric in parallel by splitting the images between
the workers. If set to -1, will use the value of
multiprocessing.cpu_count().
print_digits: Number of significant digits to print in summary of computed
metrics.
Returns:
The computed result of the metric as a float scalar.
"""
with open(gt_json_file, 'r') as gt_json_fo:
gt_json = json.load(gt_json_fo)
with open(pred_json_file, 'r') as pred_json_fo:
pred_json = json.load(pred_json_fo)
if gt_folder is None:
gt_folder = gt_json_file.replace('.json', '')
if pred_folder is None:
pred_folder = pred_json_file.replace('.json', '')
if intersection_offset is None:
intersection_offset = (num_categories + 1) * max_instances_per_category
metric_aggregator = _build_metric(
metric, num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
if num_workers == -1:
logging.info('Attempting to get the CPU count to set # workers.')
num_workers = multiprocessing.cpu_count()
if num_workers > 0:
logging.info('Computing metric in parallel with %d workers.', num_workers)
work_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
workers = []
worker_args = (metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue)
for _ in six.moves.range(num_workers):
workers.append(
multiprocessing.Process(target=_run_metrics_worker, args=worker_args))
for worker in workers:
worker.start()
for ann_pair in _matched_annotations(gt_json, pred_json):
work_queue.put(ann_pair, block=True)
# Will cause each worker to return a result and terminate upon recieving a
# None task.
for _ in six.moves.range(num_workers):
work_queue.put(None, block=True)
# Retrieve results.
for _ in six.moves.range(num_workers):
metric_aggregator.merge(result_queue.get(block=True))
for worker in workers:
worker.join()
else:
logging.info('Computing metric in a single process.')
annotation_pairs = _matched_annotations(gt_json, pred_json)
_compute_metric(metric_aggregator, gt_folder, pred_folder, annotation_pairs)
is_thing = _is_thing_array(gt_json['categories'], ignored_label)
metric_aggregator.print_detailed_results(
is_thing=is_thing, print_digits=print_digits)
return metric_aggregator.detailed_results(is_thing=is_thing)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
eval_coco_format(FLAGS.gt_json_file, FLAGS.pred_json_file, FLAGS.gt_folder,
FLAGS.pred_folder, FLAGS.metric, FLAGS.num_categories,
FLAGS.ignored_label, FLAGS.max_instances_per_category,
FLAGS.intersection_offset, FLAGS.normalize_by_image_size,
FLAGS.num_workers, FLAGS.print_digits)
if __name__ == '__main__':
flags.mark_flags_as_required(
['gt_json_file', 'gt_folder', 'pred_json_file', 'pred_folder'])
app.run(main)
| 14,106 | 40.613569 | 80 | py |
models | models-master/research/deeplab/evaluation/panoptic_quality.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Panoptic Quality metric.
Panoptic Quality is an instance-based metric for evaluating the task of
image parsing, aka panoptic segmentation.
Please see the paper for details:
"Panoptic Segmentation", Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother and Piotr Dollar. arXiv:1801.00868, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import prettytable
import six
from deeplab.evaluation import base_metric
def _ids_to_counts(id_array):
"""Given a numpy array, a mapping from each unique entry to its count."""
ids, counts = np.unique(id_array, return_counts=True)
return dict(six.moves.zip(ids, counts))
class PanopticQuality(base_metric.SegmentationMetric):
"""Metric class for Panoptic Quality.
"Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother, Piotr Dollar.
https://arxiv.org/abs/1801.00868
"""
def compare_and_accumulate(
self, groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array):
"""See base class."""
# First, combine the category and instance labels so that every unique
# value for (category, instance) is assigned a unique integer label.
pred_segment_id = self._naively_combine_labels(predicted_category_array,
predicted_instance_array)
gt_segment_id = self._naively_combine_labels(groundtruth_category_array,
groundtruth_instance_array)
# Pre-calculate areas for all groundtruth and predicted segments.
gt_segment_areas = _ids_to_counts(gt_segment_id)
pred_segment_areas = _ids_to_counts(pred_segment_id)
# We assume there is only one void segment and it has instance id = 0.
void_segment_id = self.ignored_label * self.max_instances_per_category
# There may be other ignored groundtruth segments with instance id > 0, find
# those ids using the unique segment ids extracted with the area computation
# above.
ignored_segment_ids = {
gt_segment_id for gt_segment_id in six.iterkeys(gt_segment_areas)
if (gt_segment_id //
self.max_instances_per_category) == self.ignored_label
}
# Next, combine the groundtruth and predicted labels. Dividing up the pixels
# based on which groundtruth segment and which predicted segment they belong
# to, this will assign a different 32-bit integer label to each choice
# of (groundtruth segment, predicted segment), encoded as
# gt_segment_id * offset + pred_segment_id.
intersection_id_array = (
gt_segment_id.astype(np.uint32) * self.offset +
pred_segment_id.astype(np.uint32))
# For every combination of (groundtruth segment, predicted segment) with a
# non-empty intersection, this counts the number of pixels in that
# intersection.
intersection_areas = _ids_to_counts(intersection_id_array)
# Helper function that computes the area of the overlap between a predicted
# segment and the ground-truth void/ignored segment.
def prediction_void_overlap(pred_segment_id):
void_intersection_id = void_segment_id * self.offset + pred_segment_id
return intersection_areas.get(void_intersection_id, 0)
# Compute overall ignored overlap.
def prediction_ignored_overlap(pred_segment_id):
total_ignored_overlap = 0
for ignored_segment_id in ignored_segment_ids:
intersection_id = ignored_segment_id * self.offset + pred_segment_id
total_ignored_overlap += intersection_areas.get(intersection_id, 0)
return total_ignored_overlap
# Sets that are populated with which segments groundtruth/predicted segments
# have been matched with overlapping predicted/groundtruth segments
# respectively.
gt_matched = set()
pred_matched = set()
# Calculate IoU per pair of intersecting segments of the same category.
for intersection_id, intersection_area in six.iteritems(intersection_areas):
gt_segment_id = intersection_id // self.offset
pred_segment_id = intersection_id % self.offset
gt_category = gt_segment_id // self.max_instances_per_category
pred_category = pred_segment_id // self.max_instances_per_category
if gt_category != pred_category:
continue
# Union between the groundtruth and predicted segments being compared does
# not include the portion of the predicted segment that consists of
# groundtruth "void" pixels.
union = (
gt_segment_areas[gt_segment_id] +
pred_segment_areas[pred_segment_id] - intersection_area -
prediction_void_overlap(pred_segment_id))
iou = intersection_area / union
if iou > 0.5:
self.tp_per_class[gt_category] += 1
self.iou_per_class[gt_category] += iou
gt_matched.add(gt_segment_id)
pred_matched.add(pred_segment_id)
# Count false negatives for each category.
for gt_segment_id in six.iterkeys(gt_segment_areas):
if gt_segment_id in gt_matched:
continue
category = gt_segment_id // self.max_instances_per_category
# Failing to detect a void segment is not a false negative.
if category == self.ignored_label:
continue
self.fn_per_class[category] += 1
# Count false positives for each category.
for pred_segment_id in six.iterkeys(pred_segment_areas):
if pred_segment_id in pred_matched:
continue
# A false positive is not penalized if is mostly ignored in the
# groundtruth.
if (prediction_ignored_overlap(pred_segment_id) /
pred_segment_areas[pred_segment_id]) > 0.5:
continue
category = pred_segment_id // self.max_instances_per_category
self.fp_per_class[category] += 1
return self.result()
def _valid_categories(self):
"""Categories with a "valid" value for the metric, have > 0 instances.
We will ignore the `ignore_label` class and other classes which have
`tp + fn + fp = 0`.
Returns:
Boolean array of shape `[num_categories]`.
"""
valid_categories = np.not_equal(
self.tp_per_class + self.fn_per_class + self.fp_per_class, 0)
if self.ignored_label >= 0 and self.ignored_label < self.num_categories:
valid_categories[self.ignored_label] = False
return valid_categories
def detailed_results(self, is_thing=None):
"""See base class."""
valid_categories = self._valid_categories()
# If known, break down which categories are valid _and_ things/stuff.
category_sets = collections.OrderedDict()
category_sets['All'] = valid_categories
if is_thing is not None:
category_sets['Things'] = np.logical_and(valid_categories, is_thing)
category_sets['Stuff'] = np.logical_and(valid_categories,
np.logical_not(is_thing))
# Compute individual per-class metrics that constitute factors of PQ.
sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)
rq = base_metric.realdiv_maybe_zero(
self.tp_per_class,
self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)
pq = np.multiply(sq, rq)
# Assemble detailed results dictionary.
results = {}
for category_set_name, in_category_set in six.iteritems(category_sets):
if np.any(in_category_set):
results[category_set_name] = {
'pq': np.mean(pq[in_category_set]),
'sq': np.mean(sq[in_category_set]),
'rq': np.mean(rq[in_category_set]),
# The number of categories in this subset.
'n': np.sum(in_category_set.astype(np.int32)),
}
else:
results[category_set_name] = {'pq': 0, 'sq': 0, 'rq': 0, 'n': 0}
return results
def result_per_category(self):
"""See base class."""
sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)
rq = base_metric.realdiv_maybe_zero(
self.tp_per_class,
self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)
return np.multiply(sq, rq)
def print_detailed_results(self, is_thing=None, print_digits=3):
"""See base class."""
results = self.detailed_results(is_thing=is_thing)
tab = prettytable.PrettyTable()
tab.add_column('', [], align='l')
for fieldname in ['PQ', 'SQ', 'RQ', 'N']:
tab.add_column(fieldname, [], align='r')
for category_set, subset_results in six.iteritems(results):
data_cols = [
round(subset_results[col_key], print_digits) * 100
for col_key in ['pq', 'sq', 'rq']
]
data_cols += [subset_results['n']]
tab.add_row([category_set] + data_cols)
print(tab)
def result(self):
"""See base class."""
pq_per_class = self.result_per_category()
valid_categories = self._valid_categories()
if not np.any(valid_categories):
return 0.
return np.mean(pq_per_class[valid_categories])
def merge(self, other_instance):
"""See base class."""
self.iou_per_class += other_instance.iou_per_class
self.tp_per_class += other_instance.tp_per_class
self.fn_per_class += other_instance.fn_per_class
self.fp_per_class += other_instance.fp_per_class
def reset(self):
"""See base class."""
self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64)
| 10,446 | 39.180769 | 80 | py |
models | models-master/research/deeplab/evaluation/streaming_metrics_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for segmentation "streaming" metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import tensorflow as tf
from deeplab.evaluation import streaming_metrics
from deeplab.evaluation import test_utils
# See the definition of the color names at:
# https://en.wikipedia.org/wiki/Web_colors.
_CLASS_COLOR_MAP = {
(0, 0, 0): 0,
(0, 0, 255): 1, # Person (blue).
(255, 0, 0): 2, # Bear (red).
(0, 255, 0): 3, # Tree (lime).
(255, 0, 255): 4, # Bird (fuchsia).
(0, 255, 255): 5, # Sky (aqua).
(255, 255, 0): 6, # Cat (yellow).
}
class StreamingPanopticQualityTest(tf.test.TestCase):
def test_streaming_metric_on_single_image(self):
offset = 256 * 256
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
gt_class_tensor = tf.placeholder(tf.uint16)
gt_instance_tensor = tf.placeholder(tf.uint16)
pred_class_tensor = tf.placeholder(tf.uint16)
pred_instance_tensor = tf.placeholder(tf.uint16)
qualities, update_pq = streaming_metrics.streaming_panoptic_quality(
gt_class_tensor,
gt_instance_tensor,
pred_class_tensor,
pred_instance_tensor,
num_classes=3,
max_instances_per_category=256,
ignored_label=0,
offset=offset)
pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0)
feed_dict = {
gt_class_tensor: gt_classes,
gt_instance_tensor: gt_instances,
pred_class_tensor: pred_classes,
pred_instance_tensor: pred_instances
}
with self.session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_pq, feed_dict=feed_dict)
(result_pq, result_sq, result_rq, result_total_tp, result_total_fn,
result_total_fp) = sess.run([pq, sq, rq, total_tp, total_fn, total_fp],
feed_dict=feed_dict)
np.testing.assert_array_almost_equal(
result_pq, [2.06104, 0.7024, 0.54069], decimal=4)
np.testing.assert_array_almost_equal(
result_sq, [2.06104, 0.7526, 0.54069], decimal=4)
np.testing.assert_array_almost_equal(result_rq, [1., 0.9333, 1.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_tp, [1., 7., 1.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_fn, [0., 1., 0.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_fp, [0., 0., 0.], decimal=4)
def test_streaming_metric_on_multiple_images(self):
num_classes = 7
offset = 256 * 256
bird_gt_instance_class_map = {
92: 5,
176: 3,
255: 4,
}
cat_gt_instance_class_map = {
0: 0,
255: 6,
}
team_gt_instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
test_image = collections.namedtuple(
'TestImage',
['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path'])
test_images = [
test_image(bird_gt_instance_class_map, 'bird_gt.png',
'bird_pred_instance.png', 'bird_pred_class.png'),
test_image(cat_gt_instance_class_map, 'cat_gt.png',
'cat_pred_instance.png', 'cat_pred_class.png'),
test_image(team_gt_instance_class_map, 'team_gt_instance.png',
'team_pred_instance.png', 'team_pred_class.png'),
]
gt_classes = []
gt_instances = []
pred_classes = []
pred_instances = []
for test_image in test_images:
(image_gt_instances,
image_gt_classes) = test_utils.panoptic_segmentation_with_class_map(
test_image.gt_path, test_image.gt_class_map)
gt_classes.append(image_gt_classes)
gt_instances.append(image_gt_instances)
pred_classes.append(
test_utils.read_segmentation_with_rgb_color_map(
test_image.pred_class_path, _CLASS_COLOR_MAP))
pred_instances.append(
test_utils.read_test_image(test_image.pred_inst_path, mode='L'))
gt_class_tensor = tf.placeholder(tf.uint16)
gt_instance_tensor = tf.placeholder(tf.uint16)
pred_class_tensor = tf.placeholder(tf.uint16)
pred_instance_tensor = tf.placeholder(tf.uint16)
qualities, update_pq = streaming_metrics.streaming_panoptic_quality(
gt_class_tensor,
gt_instance_tensor,
pred_class_tensor,
pred_instance_tensor,
num_classes=num_classes,
max_instances_per_category=256,
ignored_label=0,
offset=offset)
pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0)
with self.session() as sess:
sess.run(tf.local_variables_initializer())
for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip(
pred_classes, pred_instances, gt_classes, gt_instances):
sess.run(
update_pq,
feed_dict={
gt_class_tensor: gt_class,
gt_instance_tensor: gt_instance,
pred_class_tensor: pred_class,
pred_instance_tensor: pred_instance
})
(result_pq, result_sq, result_rq, result_total_tp, result_total_fn,
result_total_fp) = sess.run(
[pq, sq, rq, total_tp, total_fn, total_fp],
feed_dict={
gt_class_tensor: 0,
gt_instance_tensor: 0,
pred_class_tensor: 0,
pred_instance_tensor: 0
})
np.testing.assert_array_almost_equal(
result_pq,
[4.3107, 0.7024, 0.54069, 0.745353, 0.85768, 0.99107, 0.77410],
decimal=4)
np.testing.assert_array_almost_equal(
result_sq, [5.3883, 0.7526, 0.5407, 0.7454, 0.8577, 0.9911, 0.7741],
decimal=4)
np.testing.assert_array_almost_equal(
result_rq, [0.8, 0.9333, 1., 1., 1., 1., 1.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_tp, [2., 7., 1., 1., 1., 1., 1.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_fn, [0., 1., 0., 0., 0., 0., 0.], decimal=4)
np.testing.assert_array_almost_equal(
result_total_fp, [1., 0., 0., 0., 0., 0., 0.], decimal=4)
class StreamingParsingCoveringTest(tf.test.TestCase):
def test_streaming_metric_on_single_image(self):
offset = 256 * 256
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
gt_class_tensor = tf.placeholder(tf.uint16)
gt_instance_tensor = tf.placeholder(tf.uint16)
pred_class_tensor = tf.placeholder(tf.uint16)
pred_instance_tensor = tf.placeholder(tf.uint16)
coverings, update_ops = streaming_metrics.streaming_parsing_covering(
gt_class_tensor,
gt_instance_tensor,
pred_class_tensor,
pred_instance_tensor,
num_classes=3,
max_instances_per_category=256,
ignored_label=0,
offset=offset,
normalize_by_image_size=False)
(per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = (
tf.unstack(coverings, num=3, axis=0))
feed_dict = {
gt_class_tensor: gt_classes,
gt_instance_tensor: gt_instances,
pred_class_tensor: pred_classes,
pred_instance_tensor: pred_instances
}
with self.session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_ops, feed_dict=feed_dict)
(result_per_class_coverings, result_per_class_weighted_ious,
result_per_class_gt_areas) = (
sess.run([
per_class_coverings,
per_class_weighted_ious,
per_class_gt_areas,
],
feed_dict=feed_dict))
np.testing.assert_array_almost_equal(
result_per_class_coverings, [0.0, 0.7009696912, 0.5406896552],
decimal=4)
np.testing.assert_array_almost_equal(
result_per_class_weighted_ious, [0.0, 39864.14634, 3136], decimal=4)
np.testing.assert_array_equal(result_per_class_gt_areas, [0, 56870, 5800])
def test_streaming_metric_on_multiple_images(self):
"""Tests streaming parsing covering metric."""
num_classes = 7
offset = 256 * 256
bird_gt_instance_class_map = {
92: 5,
176: 3,
255: 4,
}
cat_gt_instance_class_map = {
0: 0,
255: 6,
}
team_gt_instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
test_image = collections.namedtuple(
'TestImage',
['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path'])
test_images = [
test_image(bird_gt_instance_class_map, 'bird_gt.png',
'bird_pred_instance.png', 'bird_pred_class.png'),
test_image(cat_gt_instance_class_map, 'cat_gt.png',
'cat_pred_instance.png', 'cat_pred_class.png'),
test_image(team_gt_instance_class_map, 'team_gt_instance.png',
'team_pred_instance.png', 'team_pred_class.png'),
]
gt_classes = []
gt_instances = []
pred_classes = []
pred_instances = []
for test_image in test_images:
(image_gt_instances,
image_gt_classes) = test_utils.panoptic_segmentation_with_class_map(
test_image.gt_path, test_image.gt_class_map)
gt_classes.append(image_gt_classes)
gt_instances.append(image_gt_instances)
pred_instances.append(
test_utils.read_test_image(test_image.pred_inst_path, mode='L'))
pred_classes.append(
test_utils.read_segmentation_with_rgb_color_map(
test_image.pred_class_path, _CLASS_COLOR_MAP))
gt_class_tensor = tf.placeholder(tf.uint16)
gt_instance_tensor = tf.placeholder(tf.uint16)
pred_class_tensor = tf.placeholder(tf.uint16)
pred_instance_tensor = tf.placeholder(tf.uint16)
coverings, update_ops = streaming_metrics.streaming_parsing_covering(
gt_class_tensor,
gt_instance_tensor,
pred_class_tensor,
pred_instance_tensor,
num_classes=num_classes,
max_instances_per_category=256,
ignored_label=0,
offset=offset,
normalize_by_image_size=False)
(per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = (
tf.unstack(coverings, num=3, axis=0))
with self.session() as sess:
sess.run(tf.local_variables_initializer())
for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip(
pred_classes, pred_instances, gt_classes, gt_instances):
sess.run(
update_ops,
feed_dict={
gt_class_tensor: gt_class,
gt_instance_tensor: gt_instance,
pred_class_tensor: pred_class,
pred_instance_tensor: pred_instance
})
(result_per_class_coverings, result_per_class_weighted_ious,
result_per_class_gt_areas) = (
sess.run(
[
per_class_coverings,
per_class_weighted_ious,
per_class_gt_areas,
],
feed_dict={
gt_class_tensor: 0,
gt_instance_tensor: 0,
pred_class_tensor: 0,
pred_instance_tensor: 0
}))
np.testing.assert_array_almost_equal(
result_per_class_coverings, [
0.0,
0.7009696912,
0.5406896552,
0.7453531599,
0.8576779026,
0.9910687881,
0.7741046032,
],
decimal=4)
np.testing.assert_array_almost_equal(
result_per_class_weighted_ious, [
0.0,
39864.14634,
3136,
1177.657993,
2498.41573,
33366.31289,
26671,
],
decimal=4)
np.testing.assert_array_equal(result_per_class_gt_areas, [
0.0,
56870,
5800,
1580,
2913,
33667,
34454,
])
def test_streaming_metric_on_multiple_images_normalize_by_size(self):
"""Tests streaming parsing covering metric with image size normalization."""
num_classes = 7
offset = 256 * 256
bird_gt_instance_class_map = {
92: 5,
176: 3,
255: 4,
}
cat_gt_instance_class_map = {
0: 0,
255: 6,
}
team_gt_instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
test_image = collections.namedtuple(
'TestImage',
['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path'])
test_images = [
test_image(bird_gt_instance_class_map, 'bird_gt.png',
'bird_pred_instance.png', 'bird_pred_class.png'),
test_image(cat_gt_instance_class_map, 'cat_gt.png',
'cat_pred_instance.png', 'cat_pred_class.png'),
test_image(team_gt_instance_class_map, 'team_gt_instance.png',
'team_pred_instance.png', 'team_pred_class.png'),
]
gt_classes = []
gt_instances = []
pred_classes = []
pred_instances = []
for test_image in test_images:
(image_gt_instances,
image_gt_classes) = test_utils.panoptic_segmentation_with_class_map(
test_image.gt_path, test_image.gt_class_map)
gt_classes.append(image_gt_classes)
gt_instances.append(image_gt_instances)
pred_instances.append(
test_utils.read_test_image(test_image.pred_inst_path, mode='L'))
pred_classes.append(
test_utils.read_segmentation_with_rgb_color_map(
test_image.pred_class_path, _CLASS_COLOR_MAP))
gt_class_tensor = tf.placeholder(tf.uint16)
gt_instance_tensor = tf.placeholder(tf.uint16)
pred_class_tensor = tf.placeholder(tf.uint16)
pred_instance_tensor = tf.placeholder(tf.uint16)
coverings, update_ops = streaming_metrics.streaming_parsing_covering(
gt_class_tensor,
gt_instance_tensor,
pred_class_tensor,
pred_instance_tensor,
num_classes=num_classes,
max_instances_per_category=256,
ignored_label=0,
offset=offset,
normalize_by_image_size=True)
(per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = (
tf.unstack(coverings, num=3, axis=0))
with self.session() as sess:
sess.run(tf.local_variables_initializer())
for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip(
pred_classes, pred_instances, gt_classes, gt_instances):
sess.run(
update_ops,
feed_dict={
gt_class_tensor: gt_class,
gt_instance_tensor: gt_instance,
pred_class_tensor: pred_class,
pred_instance_tensor: pred_instance
})
(result_per_class_coverings, result_per_class_weighted_ious,
result_per_class_gt_areas) = (
sess.run(
[
per_class_coverings,
per_class_weighted_ious,
per_class_gt_areas,
],
feed_dict={
gt_class_tensor: 0,
gt_instance_tensor: 0,
pred_class_tensor: 0,
pred_instance_tensor: 0
}))
np.testing.assert_array_almost_equal(
result_per_class_coverings, [
0.0,
0.7009696912,
0.5406896552,
0.7453531599,
0.8576779026,
0.9910687881,
0.7741046032,
],
decimal=4)
np.testing.assert_array_almost_equal(
result_per_class_weighted_ious, [
0.0,
0.5002088756,
0.03935002196,
0.03086105851,
0.06547211033,
0.8743792686,
0.2549565051,
],
decimal=4)
np.testing.assert_array_almost_equal(
result_per_class_gt_areas, [
0.0,
0.7135955832,
0.07277746408,
0.04140461216,
0.07633647799,
0.8822589099,
0.3293566581,
],
decimal=4)
if __name__ == '__main__':
tf.test.main()
| 18,218 | 32.125455 | 80 | py |
models | models-master/research/deeplab/evaluation/panoptic_quality_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Panoptic Quality metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
import six
from deeplab.evaluation import panoptic_quality
from deeplab.evaluation import test_utils
# See the definition of the color names at:
# https://en.wikipedia.org/wiki/Web_colors.
_CLASS_COLOR_MAP = {
(0, 0, 0): 0,
(0, 0, 255): 1, # Person (blue).
(255, 0, 0): 2, # Bear (red).
(0, 255, 0): 3, # Tree (lime).
(255, 0, 255): 4, # Bird (fuchsia).
(0, 255, 255): 5, # Sky (aqua).
(255, 255, 0): 6, # Cat (yellow).
}
class PanopticQualityTest(absltest.TestCase):
def test_perfect_match(self):
categories = np.zeros([6, 6], np.uint16)
instances = np.array([
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 1, 1, 1],
[1, 2, 1, 1, 1, 1],
],
dtype=np.uint16)
pq = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
pq.compare_and_accumulate(categories, instances, categories, instances)
np.testing.assert_array_equal(pq.iou_per_class, [2.0])
np.testing.assert_array_equal(pq.tp_per_class, [2])
np.testing.assert_array_equal(pq.fn_per_class, [0])
np.testing.assert_array_equal(pq.fp_per_class, [0])
np.testing.assert_array_equal(pq.result_per_category(), [1.0])
self.assertEqual(pq.result(), 1.0)
def test_totally_wrong(self):
det_categories = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
gt_categories = 1 - det_categories
instances = np.zeros([6, 6], np.uint16)
pq = panoptic_quality.PanopticQuality(
num_categories=2,
ignored_label=2,
max_instances_per_category=1,
offset=16)
pq.compare_and_accumulate(gt_categories, instances, det_categories,
instances)
np.testing.assert_array_equal(pq.iou_per_class, [0.0, 0.0])
np.testing.assert_array_equal(pq.tp_per_class, [0, 0])
np.testing.assert_array_equal(pq.fn_per_class, [1, 1])
np.testing.assert_array_equal(pq.fp_per_class, [1, 1])
np.testing.assert_array_equal(pq.result_per_category(), [0.0, 0.0])
self.assertEqual(pq.result(), 0.0)
def test_matches_by_iou(self):
good_det_labels = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
gt_labels = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
pq = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
pq.compare_and_accumulate(
np.zeros_like(gt_labels), gt_labels, np.zeros_like(good_det_labels),
good_det_labels)
# iou(1, 1) = 28/30
# iou(2, 2) = 6/8
np.testing.assert_array_almost_equal(pq.iou_per_class, [28 / 30 + 6 / 8])
np.testing.assert_array_equal(pq.tp_per_class, [2])
np.testing.assert_array_equal(pq.fn_per_class, [0])
np.testing.assert_array_equal(pq.fp_per_class, [0])
self.assertAlmostEqual(pq.result(), (28 / 30 + 6 / 8) / 2)
bad_det_labels = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
pq.reset()
pq.compare_and_accumulate(
np.zeros_like(gt_labels), gt_labels, np.zeros_like(bad_det_labels),
bad_det_labels)
# iou(1, 1) = 27/32
np.testing.assert_array_almost_equal(pq.iou_per_class, [27 / 32])
np.testing.assert_array_equal(pq.tp_per_class, [1])
np.testing.assert_array_equal(pq.fn_per_class, [1])
np.testing.assert_array_equal(pq.fp_per_class, [1])
self.assertAlmostEqual(pq.result(), (27 / 32) * (1 / 2))
def test_wrong_instances(self):
categories = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 1, 2, 2],
[1, 2, 2, 1, 2, 2],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
predicted_instances = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
groundtruth_instances = np.zeros([6, 6], dtype=np.uint16)
pq = panoptic_quality.PanopticQuality(
num_categories=3,
ignored_label=0,
max_instances_per_category=10,
offset=100)
pq.compare_and_accumulate(categories, groundtruth_instances, categories,
predicted_instances)
np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 0.0])
np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 0])
np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 1])
np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 2])
np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 0])
self.assertAlmostEqual(pq.result(), 0.5)
def test_instance_order_is_arbitrary(self):
categories = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 1, 2, 2],
[1, 2, 2, 1, 2, 2],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
predicted_instances = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
groundtruth_instances = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
pq = panoptic_quality.PanopticQuality(
num_categories=3,
ignored_label=0,
max_instances_per_category=10,
offset=100)
pq.compare_and_accumulate(categories, groundtruth_instances, categories,
predicted_instances)
np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 2.0])
np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 2])
np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 0])
np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0])
np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 1])
self.assertAlmostEqual(pq.result(), 1.0)
def test_matches_expected(self):
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pq = panoptic_quality.PanopticQuality(
num_categories=3,
ignored_label=0,
max_instances_per_category=256,
offset=256 * 256)
pq.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
pred_instances)
np.testing.assert_array_almost_equal(
pq.iou_per_class, [2.06104, 5.26827, 0.54069], decimal=4)
np.testing.assert_array_equal(pq.tp_per_class, [1, 7, 1])
np.testing.assert_array_equal(pq.fn_per_class, [0, 1, 0])
np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0])
np.testing.assert_array_almost_equal(pq.result_per_category(),
[2.061038, 0.702436, 0.54069])
self.assertAlmostEqual(pq.result(), 0.62156287)
def test_merge_accumulates_all_across_instances(self):
categories = np.zeros([6, 6], np.uint16)
good_det_labels = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
gt_labels = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
good_pq = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
for _ in six.moves.range(2):
good_pq.compare_and_accumulate(categories, gt_labels, categories,
good_det_labels)
bad_det_labels = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
bad_pq = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
for _ in six.moves.range(2):
bad_pq.compare_and_accumulate(categories, gt_labels, categories,
bad_det_labels)
good_pq.merge(bad_pq)
np.testing.assert_array_almost_equal(
good_pq.iou_per_class, [2 * (28 / 30 + 6 / 8) + 2 * (27 / 32)])
np.testing.assert_array_equal(good_pq.tp_per_class, [2 * 2 + 2])
np.testing.assert_array_equal(good_pq.fn_per_class, [2])
np.testing.assert_array_equal(good_pq.fp_per_class, [2])
self.assertAlmostEqual(good_pq.result(), 0.63177083)
if __name__ == '__main__':
absltest.main()
| 11,247 | 32.376855 | 80 | py |
models | models-master/research/deeplab/evaluation/parsing_covering_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Parsing Covering metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from deeplab.evaluation import parsing_covering
from deeplab.evaluation import test_utils
# See the definition of the color names at:
# https://en.wikipedia.org/wiki/Web_colors.
_CLASS_COLOR_MAP = {
(0, 0, 0): 0,
(0, 0, 255): 1, # Person (blue).
(255, 0, 0): 2, # Bear (red).
(0, 255, 0): 3, # Tree (lime).
(255, 0, 255): 4, # Bird (fuchsia).
(0, 255, 255): 5, # Sky (aqua).
(255, 255, 0): 6, # Cat (yellow).
}
class CoveringConveringTest(absltest.TestCase):
def test_perfect_match(self):
categories = np.zeros([6, 6], np.uint16)
instances = np.array([
[2, 2, 2, 2, 2, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 2, 2, 2],
[2, 4, 2, 2, 2, 2],
],
dtype=np.uint16)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=2,
max_instances_per_category=2,
offset=16,
normalize_by_image_size=False)
pc.compare_and_accumulate(categories, instances, categories, instances)
np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 21.0, 0.0])
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 21.0, 0.0])
np.testing.assert_array_equal(pc.result_per_category(), [0.0, 1.0, 0.0])
self.assertEqual(pc.result(), 1.0)
def test_totally_wrong(self):
categories = np.zeros([6, 6], np.uint16)
gt_instances = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
pred_instances = 1 - gt_instances
pc = parsing_covering.ParsingCovering(
num_categories=2,
ignored_label=0,
max_instances_per_category=1,
offset=16,
normalize_by_image_size=False)
pc.compare_and_accumulate(categories, gt_instances, categories,
pred_instances)
np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 0.0])
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 10.0])
np.testing.assert_array_equal(pc.result_per_category(), [0.0, 0.0])
self.assertEqual(pc.result(), 0.0)
def test_matches_expected(self):
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=0,
max_instances_per_category=256,
offset=256 * 256,
normalize_by_image_size=False)
pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
pred_instances)
np.testing.assert_array_almost_equal(
pc.weighted_iou_per_class, [0.0, 39864.14634, 3136], decimal=4)
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 56870, 5800])
np.testing.assert_array_almost_equal(
pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4)
self.assertAlmostEqual(pc.result(), 0.6208296732)
def test_matches_expected_normalize_by_size(self):
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=0,
max_instances_per_category=256,
offset=256 * 256,
normalize_by_image_size=True)
pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
pred_instances)
np.testing.assert_array_almost_equal(
pc.weighted_iou_per_class, [0.0, 0.5002088756, 0.03935002196],
decimal=4)
np.testing.assert_array_almost_equal(
pc.gt_area_per_class, [0.0, 0.7135955832, 0.07277746408], decimal=4)
# Note that the per-category and overall PCs are identical to those without
# normalization in the previous test, because we only have a single image.
np.testing.assert_array_almost_equal(
pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4)
self.assertAlmostEqual(pc.result(), 0.6208296732)
if __name__ == '__main__':
absltest.main()
| 5,968 | 33.304598 | 80 | py |
models | models-master/research/deeplab/evaluation/base_metric.py | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the top-level interface for evaluating segmentations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
_EPSILON = 1e-10
def realdiv_maybe_zero(x, y):
"""Element-wise x / y where y may contain zeros, for those returns 0 too."""
return np.where(
np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y))
@six.add_metaclass(abc.ABCMeta)
class SegmentationMetric(object):
"""Abstract base class for computers of segmentation metrics.
Subclasses will implement both:
1. Comparing the predicted segmentation for an image with the groundtruth.
2. Computing the final metric over a set of images.
These are often done as separate steps, due to the need to accumulate
intermediate values other than the metric itself across images, computing the
actual metric value only on these accumulations after all the images have been
compared.
A simple usage would be:
metric = MetricImplementation(...)
for <image>, <groundtruth> in evaluation_set:
<prediction> = run_segmentation(<image>)
metric.compare_and_accumulate(<prediction>, <groundtruth>)
print(metric.result())
"""
def __init__(self, num_categories, ignored_label, max_instances_per_category,
offset):
"""Base initialization for SegmentationMetric.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between groundtruth and predicted segments.
"""
self.num_categories = num_categories
self.ignored_label = ignored_label
self.max_instances_per_category = max_instances_per_category
self.offset = offset
self.reset()
def _naively_combine_labels(self, category_array, instance_array):
"""Naively creates a combined label array from categories and instances."""
return (category_array.astype(np.uint32) * self.max_instances_per_category +
instance_array.astype(np.uint32))
@abc.abstractmethod
def compare_and_accumulate(
self, groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array):
"""Compares predicted segmentation with groundtruth, accumulates its metric.
It is not assumed that instance ids are unique across different categories.
See for example combine_semantic_and_instance_predictions.py in official
PanopticAPI evaluation code for issues to consider when fusing category
and instance labels.
Instances ids of the ignored category have the meaning that id 0 is "void"
and remaining ones are crowd instances.
Args:
groundtruth_category_array: A 2D numpy uint16 array of groundtruth
per-pixel category labels.
groundtruth_instance_array: A 2D numpy uint16 array of groundtruth
instance labels.
predicted_category_array: A 2D numpy uint16 array of predicted per-pixel
category labels.
predicted_instance_array: A 2D numpy uint16 array of predicted instance
labels.
Returns:
The value of the metric over all comparisons done so far, including this
one, as a float scalar.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes the metric over all comparisons done so far."""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def detailed_results(self, is_thing=None):
"""Computes and returns the detailed final metric results.
Args:
is_thing: A boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff."
Returns:
A dictionary with a breakdown of metrics and/or metric factors by things,
stuff, and all categories.
"""
raise NotImplementedError('Not implemented in subclasses.')
@abc.abstractmethod
def result_per_category(self):
"""For supported metrics, return individual per-category metric values.
Returns:
A numpy array of shape `[self.num_categories]`, where index `i` is the
metrics value over only that category.
"""
raise NotImplementedError('Not implemented in subclass.')
def print_detailed_results(self, is_thing=None, print_digits=3):
"""Prints out a detailed breakdown of metric results.
Args:
is_thing: A boolean array of length num_categories.
`is_thing[category_id]` will say whether that category is a "thing"
rather than "stuff."
print_digits: Number of significant digits to print in computed metrics.
"""
raise NotImplementedError('Not implemented in subclass.')
@abc.abstractmethod
def merge(self, other_instance):
"""Combines the accumulated results of another instance into self.
The following two cases should put `metric_a` into an equivalent state.
Case 1 (with merge):
metric_a = MetricsSubclass(...)
metric_a.compare_and_accumulate(<comparison 1>)
metric_a.compare_and_accumulate(<comparison 2>)
metric_b = MetricsSubclass(...)
metric_b.compare_and_accumulate(<comparison 3>)
metric_b.compare_and_accumulate(<comparison 4>)
metric_a.merge(metric_b)
Case 2 (without merge):
metric_a = MetricsSubclass(...)
metric_a.compare_and_accumulate(<comparison 1>)
metric_a.compare_and_accumulate(<comparison 2>)
metric_a.compare_and_accumulate(<comparison 3>)
metric_a.compare_and_accumulate(<comparison 4>)
Args:
other_instance: Another compatible instance of the same metric subclass.
"""
raise NotImplementedError('Not implemented in subclass.')
@abc.abstractmethod
def reset(self):
"""Resets the accumulation to the metric class's state at initialization.
Note that this function will be called in SegmentationMetric.__init__.
"""
raise NotImplementedError('Must be implemented in subclasses.')
| 7,185 | 36.427083 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.