repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/deeplab/evaluation/streaming_metrics.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code to compute segmentation in a "streaming" pattern in Tensorflow.
These aggregate the metric over examples of the evaluation set. Each example is
assumed to be fed in in a stream, and the metric implementation accumulates
across them.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from deeplab.evaluation import panoptic_quality
from deeplab.evaluation import parsing_covering
_EPSILON = 1e-10
def _realdiv_maybe_zero(x, y):
"""Support tf.realdiv(x, y) where y may contain zeros."""
return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y))
def _running_total(value, shape, name=None):
"""Maintains a running total of tensor `value` between calls."""
with tf.variable_scope(name, 'running_total', [value]):
total_var = tf.get_variable(
'total',
shape,
value.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[
tf.GraphKeys.LOCAL_VARIABLES, tf.GraphKeys.METRIC_VARIABLES
])
updated_total = tf.assign_add(total_var, value, use_locking=True)
return total_var, updated_total
def _panoptic_quality_helper(
groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array, num_classes,
max_instances_per_category, ignored_label, offset):
"""Helper function to compute panoptic quality."""
pq = panoptic_quality.PanopticQuality(num_classes, ignored_label,
max_instances_per_category, offset)
pq.compare_and_accumulate(groundtruth_category_array,
groundtruth_instance_array,
predicted_category_array, predicted_instance_array)
return pq.iou_per_class, pq.tp_per_class, pq.fn_per_class, pq.fp_per_class
def streaming_panoptic_quality(groundtruth_categories,
groundtruth_instances,
predicted_categories,
predicted_instances,
num_classes,
max_instances_per_category,
ignored_label,
offset,
name=None):
"""Aggregates the panoptic metric across calls with different input tensors.
See tf.metrics.* functions for comparable functionality and usage.
Args:
groundtruth_categories: A 2D uint16 tensor of groundtruth category labels.
groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels.
predicted_categories: A 2D uint16 tensor of predicted category labels.
predicted_instances: A 2D uint16 tensor of predicted instance labels.
num_classes: Number of classes in the dataset as an integer.
max_instances_per_category: The maximum number of instances for each class
as an integer or integer tensor.
ignored_label: The class id to be ignored in evaluation as an integer or
integer tensor.
offset: The maximum number of unique labels as an integer or integer tensor.
name: An optional variable_scope name.
Returns:
qualities: A tensor of shape `[6, num_classes]`, where (1) panoptic quality,
(2) segmentation quality, (3) recognition quality, (4) total_tp,
(5) total_fn and (6) total_fp are saved in the respective rows.
update_ops: List of operations that update the running overall panoptic
quality.
Raises:
RuntimeError: If eager execution is enabled.
"""
if tf.executing_eagerly():
raise RuntimeError('Cannot aggregate when eager execution is enabled.')
input_args = [
tf.convert_to_tensor(groundtruth_categories, tf.uint16),
tf.convert_to_tensor(groundtruth_instances, tf.uint16),
tf.convert_to_tensor(predicted_categories, tf.uint16),
tf.convert_to_tensor(predicted_instances, tf.uint16),
tf.convert_to_tensor(num_classes, tf.int32),
tf.convert_to_tensor(max_instances_per_category, tf.int32),
tf.convert_to_tensor(ignored_label, tf.int32),
tf.convert_to_tensor(offset, tf.int32),
]
return_types = [
tf.float64,
tf.float64,
tf.float64,
tf.float64,
]
with tf.variable_scope(name, 'streaming_panoptic_quality', input_args):
panoptic_results = tf.py_func(
_panoptic_quality_helper, input_args, return_types, stateful=False)
iou, tp, fn, fp = tuple(panoptic_results)
total_iou, updated_iou = _running_total(
iou, [num_classes], name='iou_total')
total_tp, updated_tp = _running_total(tp, [num_classes], name='tp_total')
total_fn, updated_fn = _running_total(fn, [num_classes], name='fn_total')
total_fp, updated_fp = _running_total(fp, [num_classes], name='fp_total')
update_ops = [updated_iou, updated_tp, updated_fn, updated_fp]
sq = _realdiv_maybe_zero(total_iou, total_tp)
rq = _realdiv_maybe_zero(total_tp,
total_tp + 0.5 * total_fn + 0.5 * total_fp)
pq = tf.multiply(sq, rq)
qualities = tf.stack([pq, sq, rq, total_tp, total_fn, total_fp], axis=0)
return qualities, update_ops
def _parsing_covering_helper(
groundtruth_category_array, groundtruth_instance_array,
predicted_category_array, predicted_instance_array, num_classes,
max_instances_per_category, ignored_label, offset, normalize_by_image_size):
"""Helper function to compute parsing covering."""
pc = parsing_covering.ParsingCovering(num_classes, ignored_label,
max_instances_per_category, offset,
normalize_by_image_size)
pc.compare_and_accumulate(groundtruth_category_array,
groundtruth_instance_array,
predicted_category_array, predicted_instance_array)
return pc.weighted_iou_per_class, pc.gt_area_per_class
def streaming_parsing_covering(groundtruth_categories,
groundtruth_instances,
predicted_categories,
predicted_instances,
num_classes,
max_instances_per_category,
ignored_label,
offset,
normalize_by_image_size=True,
name=None):
"""Aggregates the covering across calls with different input tensors.
See tf.metrics.* functions for comparable functionality and usage.
Args:
groundtruth_categories: A 2D uint16 tensor of groundtruth category labels.
groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels.
predicted_categories: A 2D uint16 tensor of predicted category labels.
predicted_instances: A 2D uint16 tensor of predicted instance labels.
num_classes: Number of classes in the dataset as an integer.
max_instances_per_category: The maximum number of instances for each class
as an integer or integer tensor.
ignored_label: The class id to be ignored in evaluation as an integer or
integer tensor.
offset: The maximum number of unique labels as an integer or integer tensor.
normalize_by_image_size: Whether to normalize groundtruth region areas by
image size. If True, groundtruth instance areas and weighted IoUs will be
divided by the size of the corresponding image before accumulated across
the dataset.
name: An optional variable_scope name.
Returns:
coverings: A tensor of shape `[3, num_classes]`, where (1) per class
coverings, (2) per class sum of weighted IoUs, and (3) per class sum of
groundtruth region areas are saved in the perspective rows.
update_ops: List of operations that update the running overall parsing
covering.
Raises:
RuntimeError: If eager execution is enabled.
"""
if tf.executing_eagerly():
raise RuntimeError('Cannot aggregate when eager execution is enabled.')
input_args = [
tf.convert_to_tensor(groundtruth_categories, tf.uint16),
tf.convert_to_tensor(groundtruth_instances, tf.uint16),
tf.convert_to_tensor(predicted_categories, tf.uint16),
tf.convert_to_tensor(predicted_instances, tf.uint16),
tf.convert_to_tensor(num_classes, tf.int32),
tf.convert_to_tensor(max_instances_per_category, tf.int32),
tf.convert_to_tensor(ignored_label, tf.int32),
tf.convert_to_tensor(offset, tf.int32),
tf.convert_to_tensor(normalize_by_image_size, tf.bool),
]
return_types = [
tf.float64,
tf.float64,
]
with tf.variable_scope(name, 'streaming_parsing_covering', input_args):
covering_results = tf.py_func(
_parsing_covering_helper, input_args, return_types, stateful=False)
weighted_iou_per_class, gt_area_per_class = tuple(covering_results)
total_weighted_iou_per_class, updated_weighted_iou_per_class = (
_running_total(
weighted_iou_per_class, [num_classes],
name='weighted_iou_per_class_total'))
total_gt_area_per_class, updated_gt_area_per_class = _running_total(
gt_area_per_class, [num_classes], name='gt_area_per_class_total')
covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class,
total_gt_area_per_class)
coverings = tf.stack([
covering_per_class,
total_weighted_iou_per_class,
total_gt_area_per_class,
],
axis=0)
update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class]
return coverings, update_ops
| 10,413 | 42.211618 | 80 | py |
models | models-master/research/deeplab/evaluation/test_utils_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for test_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from deeplab.evaluation import test_utils
class TestUtilsTest(absltest.TestCase):
def test_read_test_image(self):
image_array = test_utils.read_test_image('team_pred_class.png')
self.assertSequenceEqual(image_array.shape, (231, 345, 4))
def test_reads_segmentation_with_color_map(self):
rgb_to_semantic_label = {(0, 0, 0): 0, (0, 0, 255): 1, (255, 0, 0): 23}
labels = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', rgb_to_semantic_label)
input_image = test_utils.read_test_image('team_pred_class.png')
np.testing.assert_array_equal(
labels == 0,
np.logical_and(input_image[:, :, 0] == 0, input_image[:, :, 2] == 0))
np.testing.assert_array_equal(labels == 1, input_image[:, :, 2] == 255)
np.testing.assert_array_equal(labels == 23, input_image[:, :, 0] == 255)
def test_reads_gt_segmentation(self):
instance_label_to_semantic_label = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 23,
215: 1,
244: 1,
255: 1,
}
instances, classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_label_to_semantic_label)
expected_label_shape = (231, 345)
self.assertSequenceEqual(instances.shape, expected_label_shape)
self.assertSequenceEqual(classes.shape, expected_label_shape)
np.testing.assert_array_equal(instances == 0, classes == 0)
np.testing.assert_array_equal(instances == 198, classes == 23)
np.testing.assert_array_equal(
np.logical_and(instances != 0, instances != 198), classes == 1)
if __name__ == '__main__':
absltest.main()
| 2,605 | 33.746667 | 80 | py |
models | models-master/research/deeplab/evaluation/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/evaluation/eval_coco_format_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eval_coco_format script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import absltest
import evaluation as panopticapi_eval
from deeplab.evaluation import eval_coco_format
_TEST_DIR = 'deeplab/evaluation/testdata'
FLAGS = flags.FLAGS
class EvalCocoFormatTest(absltest.TestCase):
def test_compare_pq_with_reference_eval(self):
sample_data_dir = os.path.join(_TEST_DIR)
gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json')
gt_folder = os.path.join(sample_data_dir, 'coco_gt')
pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json')
pred_folder = os.path.join(sample_data_dir, 'coco_pred')
panopticapi_results = panopticapi_eval.pq_compute(
gt_json_file, pred_json_file, gt_folder, pred_folder)
deeplab_results = eval_coco_format.eval_coco_format(
gt_json_file,
pred_json_file,
gt_folder,
pred_folder,
metric='pq',
num_categories=7,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=(256 * 256))
self.assertCountEqual(
list(deeplab_results.keys()), ['All', 'Things', 'Stuff'])
for cat_group in ['All', 'Things', 'Stuff']:
self.assertCountEqual(deeplab_results[cat_group], ['pq', 'sq', 'rq', 'n'])
for metric in ['pq', 'sq', 'rq', 'n']:
self.assertAlmostEqual(deeplab_results[cat_group][metric],
panopticapi_results[cat_group][metric])
def test_compare_pc_with_golden_value(self):
sample_data_dir = os.path.join(_TEST_DIR)
gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json')
gt_folder = os.path.join(sample_data_dir, 'coco_gt')
pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json')
pred_folder = os.path.join(sample_data_dir, 'coco_pred')
deeplab_results = eval_coco_format.eval_coco_format(
gt_json_file,
pred_json_file,
gt_folder,
pred_folder,
metric='pc',
num_categories=7,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=(256 * 256),
normalize_by_image_size=False)
self.assertCountEqual(
list(deeplab_results.keys()), ['All', 'Things', 'Stuff'])
for cat_group in ['All', 'Things', 'Stuff']:
self.assertCountEqual(deeplab_results[cat_group], ['pc', 'n'])
self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561)
self.assertEqual(deeplab_results['All']['n'], 6)
self.assertAlmostEqual(deeplab_results['Things']['pc'], 0.5890529)
self.assertEqual(deeplab_results['Things']['n'], 4)
self.assertAlmostEqual(deeplab_results['Stuff']['pc'], 0.86821097)
self.assertEqual(deeplab_results['Stuff']['n'], 2)
def test_compare_pc_with_golden_value_normalize_by_size(self):
sample_data_dir = os.path.join(_TEST_DIR)
gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json')
gt_folder = os.path.join(sample_data_dir, 'coco_gt')
pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json')
pred_folder = os.path.join(sample_data_dir, 'coco_pred')
deeplab_results = eval_coco_format.eval_coco_format(
gt_json_file,
pred_json_file,
gt_folder,
pred_folder,
metric='pc',
num_categories=7,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=(256 * 256),
normalize_by_image_size=True)
self.assertCountEqual(
list(deeplab_results.keys()), ['All', 'Things', 'Stuff'])
self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68214908840)
def test_pc_with_multiple_workers(self):
sample_data_dir = os.path.join(_TEST_DIR)
gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json')
gt_folder = os.path.join(sample_data_dir, 'coco_gt')
pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json')
pred_folder = os.path.join(sample_data_dir, 'coco_pred')
deeplab_results = eval_coco_format.eval_coco_format(
gt_json_file,
pred_json_file,
gt_folder,
pred_folder,
metric='pc',
num_categories=7,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=(256 * 256),
num_workers=3,
normalize_by_image_size=False)
self.assertCountEqual(
list(deeplab_results.keys()), ['All', 'Things', 'Stuff'])
self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561668)
if __name__ == '__main__':
absltest.main()
| 5,347 | 36.929078 | 80 | py |
models | models-master/research/deeplab/evaluation/test_utils.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions to set up unit tests on Panoptic Segmentation code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import scipy.misc
import six
from six.moves import map
FLAGS = flags.FLAGS
_TEST_DIR = 'deeplab/evaluation/testdata'
def read_test_image(testdata_path, *args, **kwargs):
"""Loads a test image.
Args:
testdata_path: Image path relative to panoptic_segmentation/testdata as a
string.
*args: Additional positional arguments passed to `imread`.
**kwargs: Additional keyword arguments passed to `imread`.
Returns:
The image, as a numpy array.
"""
image_path = os.path.join(_TEST_DIR, testdata_path)
return scipy.misc.imread(image_path, *args, **kwargs)
def read_segmentation_with_rgb_color_map(image_testdata_path,
rgb_to_semantic_label,
output_dtype=None):
"""Reads a test segmentation as an image and a map from colors to labels.
Args:
image_testdata_path: Image path relative to panoptic_segmentation/testdata
as a string.
rgb_to_semantic_label: Mapping from RGB colors to integer labels as a
dictionary.
output_dtype: Type of the output labels. If None, defaults to the type of
the provided color map.
Returns:
A 2D numpy array of labels.
Raises:
ValueError: On an incomplete `rgb_to_semantic_label`.
"""
rgb_image = read_test_image(image_testdata_path, mode='RGB')
if len(rgb_image.shape) != 3 or rgb_image.shape[2] != 3:
raise AssertionError(
'Expected RGB image, actual shape is %s' % rgb_image.sape)
num_pixels = rgb_image.shape[0] * rgb_image.shape[1]
unique_colors = np.unique(np.reshape(rgb_image, [num_pixels, 3]), axis=0)
if not set(map(tuple, unique_colors)).issubset(
six.viewkeys(rgb_to_semantic_label)):
raise ValueError('RGB image has colors not in color map.')
output_dtype = output_dtype or type(
next(six.itervalues(rgb_to_semantic_label)))
output_labels = np.empty(rgb_image.shape[:2], dtype=output_dtype)
for rgb_color, int_label in six.iteritems(rgb_to_semantic_label):
color_array = np.array(rgb_color, ndmin=3)
output_labels[np.all(rgb_image == color_array, axis=2)] = int_label
return output_labels
def panoptic_segmentation_with_class_map(instance_testdata_path,
instance_label_to_semantic_label):
"""Reads in a panoptic segmentation with an instance map and a map to classes.
Args:
instance_testdata_path: Path to a grayscale instance map, given as a string
and relative to panoptic_segmentation/testdata.
instance_label_to_semantic_label: A map from instance labels to class
labels.
Returns:
A tuple `(instance_labels, class_labels)` of numpy arrays.
Raises:
ValueError: On a mismatched set of instances in
the
`instance_label_to_semantic_label`.
"""
instance_labels = read_test_image(instance_testdata_path, mode='L')
if set(np.unique(instance_labels)) != set(
six.iterkeys(instance_label_to_semantic_label)):
raise ValueError('Provided class map does not match present instance ids.')
class_labels = np.empty_like(instance_labels)
for instance_id, class_id in six.iteritems(instance_label_to_semantic_label):
class_labels[instance_labels == instance_id] = class_id
return instance_labels, class_labels
| 4,237 | 34.316667 | 80 | py |
models | models-master/research/deeplab/core/conv2d_ws.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Augment slim.conv2d with optional Weight Standardization (WS).
WS is a normalization method to accelerate micro-batch training. When used with
Group Normalization and trained with 1 image/GPU, WS is able to match or
outperform the performances of BN trained with large batch sizes.
[1] Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille
Weight Standardization. arXiv:1903.10520
[2] Lei Huang, Xianglong Liu, Yang Liu, Bo Lang, Dacheng Tao
Centered Weight Normalization in Accelerating Training of Deep Neural
Networks. ICCV 2017
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
class Conv2D(tf.keras.layers.Conv2D, tf.layers.Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
use_weight_standardization=False,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
"""Constructs the 2D convolution layer.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value !=
1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height, width,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any
stride value != 1.
activation: Activation function. Set it to None to maintain a linear
activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
use_weight_standardization: Boolean, whether the layer uses weight
standardization.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the bias
after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
**kwargs: Arbitrary keyword arguments passed to tf.keras.layers.Conv2D
"""
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.use_weight_standardization = use_weight_standardization
def call(self, inputs):
if self.use_weight_standardization:
mean, var = tf.nn.moments(self.kernel, [0, 1, 2], keep_dims=True)
kernel = (self.kernel - mean) / tf.sqrt(var + 1e-5)
outputs = self._convolution_op(inputs, kernel)
else:
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# tf.nn.bias_add does not accept a 1D input tensor.
bias = tf.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
@contrib_framework.add_arg_scope
def conv2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=contrib_layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
use_weight_standardization=False,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape +
[in_channels]` if data_format does not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same value
for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
use_weight_standardization: Boolean, whether the layer uses weight
standardization.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
# pylint: disable=protected-access
layer_variable_getter = layers._build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
# pylint: enable=protected-access
with tf.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = tf.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank != 4:
raise ValueError('Convolution expects input with rank %d, got %d' %
(4, input_rank))
data_format = ('channels_first' if data_format and
data_format.startswith('NC') else 'channels_last')
layer = Conv2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=data_format,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
use_weight_standardization=use_weight_standardization,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
# pylint: disable=protected-access
layers._add_variable_to_collections(layer.kernel, variables_collections,
'weights')
if layer.use_bias:
layers._add_variable_to_collections(layer.bias, variables_collections,
'biases')
# pylint: enable=protected-access
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
| 15,881 | 41.924324 | 80 | py |
models | models-master/research/deeplab/core/resnet_v1_beta_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_v1_beta module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import resnet_v1_beta
from tensorflow.contrib.slim.nets import resnet_utils
slim = contrib_slim
def create_test_input(batch, height, width, channels):
"""Create test input tensor."""
if None in [batch, height, width, channels]:
return tf.placeholder(tf.float32, (batch, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch, 1, 1, channels]))
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small_lite_bottleneck(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 with lite_bottleneck."""
if multi_grid is None:
multi_grid = [1, 1]
else:
if len(multi_grid) != 2:
raise ValueError('Expect multi_grid to have length 2.')
block = resnet_v1_beta.resnet_v1_small_beta_block
blocks = [
block('block1', base_depth=1, num_units=1, stride=2),
block('block2', base_depth=2, num_units=1, stride=2),
block('block3', base_depth=4, num_units=1, stride=2),
resnet_utils.Block('block4', resnet_v1_beta.lite_bottleneck, [
{'depth': 8,
'stride': 1,
'unit_rate': rate} for rate in multi_grid])]
return resnet_v1_beta.resnet_v1_beta(
inputs,
blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(
resnet_v1_beta.root_block_fn_for_beta_variant,
depth_multiplier=0.25),
reuse=reuse,
scope=scope)
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
if multi_grid is None:
multi_grid = [1, 1, 1]
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
block = resnet_v1_beta.resnet_v1_beta_block
blocks = [
block('block1', base_depth=1, num_units=1, stride=2),
block('block2', base_depth=2, num_units=1, stride=2),
block('block3', base_depth=4, num_units=1, stride=2),
resnet_utils.Block('block4', resnet_v1_beta.bottleneck, [
{'depth': 32, 'depth_bottleneck': 8, 'stride': 1,
'unit_rate': rate} for rate in multi_grid])]
return resnet_v1_beta.resnet_v1_beta(
inputs,
blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(
resnet_v1_beta.root_block_fn_for_beta_variant),
reuse=reuse,
scope=scope)
def testClassificationEndPointsWithLiteBottleneck(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationEndPointsWithMultigridAndLiteBottleneck(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
multi_grid = [1, 2]
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
multi_grid=multi_grid,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapesWithLiteBottleneck(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 112, 112, 16],
'resnet/conv1_2': [2, 112, 112, 16],
'resnet/conv1_3': [2, 112, 112, 32],
'resnet/block1': [2, 28, 28, 1],
'resnet/block2': [2, 14, 14, 2],
'resnet/block3': [2, 7, 7, 4],
'resnet/block4': [2, 7, 7, 8]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapesWithLiteBottleneck(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 161, 161, 16],
'resnet/conv1_2': [2, 161, 161, 16],
'resnet/conv1_3': [2, 161, 161, 32],
'resnet/block1': [2, 41, 41, 1],
'resnet/block2': [2, 21, 21, 2],
'resnet/block3': [2, 11, 11, 4],
'resnet/block4': [2, 11, 11, 8]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapesWithLiteBottleneck(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 161, 161, 16],
'resnet/conv1_2': [2, 161, 161, 16],
'resnet/conv1_3': [2, 161, 161, 32],
'resnet/block1': [2, 41, 41, 1],
'resnet/block2': [2, 41, 41, 2],
'resnet/block3': [2, 41, 41, 4],
'resnet/block4': [2, 41, 41, 8]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValuesWithLiteBottleneck(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small_lite_bottleneck(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small_lite_bottleneck(
inputs,
None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSizeWithLiteBottleneck(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small_lite_bottleneck(
inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small_lite_bottleneck(
inputs,
None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 8])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 8))
def testAtrousFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small_lite_bottleneck(
inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 8])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 8))
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationEndPointsWithWS(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(
resnet_v1_beta.resnet_arg_scope(use_weight_standardization=True)):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationEndPointsWithGN(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(
resnet_v1_beta.resnet_arg_scope(normalization_method='group')):
with slim.arg_scope([slim.group_norm], groups=1):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testInvalidGroupsWithGN(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with self.assertRaisesRegexp(ValueError, 'Invalid groups'):
with slim.arg_scope(
resnet_v1_beta.resnet_arg_scope(normalization_method='group')):
with slim.arg_scope([slim.group_norm], groups=32):
_, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
def testClassificationEndPointsWithGNWS(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(
resnet_v1_beta.resnet_arg_scope(
normalization_method='group', use_weight_standardization=True)):
with slim.arg_scope([slim.group_norm], groups=1):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationEndPointsWithMultigrid(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
multi_grid = [1, 2, 4]
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
multi_grid=multi_grid,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertIn('predictions', end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 112, 112, 64],
'resnet/conv1_2': [2, 112, 112, 64],
'resnet/conv1_3': [2, 112, 112, 128],
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 161, 161, 64],
'resnet/conv1_2': [2, 161, 161, 64],
'resnet/conv1_3': [2, 161, 161, 128],
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/conv1_1': [2, 161, 161, 64],
'resnet/conv1_2': [2, 161, 161, 64],
'resnet/conv1_3': [2, 161, 161, 128],
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs,
None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| 23,558 | 40.697345 | 80 | py |
models | models-master/research/deeplab/core/preprocess_utils_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocess_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow as tf
from deeplab.core import preprocess_utils
class PreprocessUtilsTest(tf.test.TestCase):
def testNoFlipWhenProbIsZero(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
image = tf.convert_to_tensor(numpy_image)
with self.test_session():
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=0)
self.assertAllEqual(numpy_image, actual.eval())
self.assertAllEqual(False, is_flipped.eval())
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=1)
self.assertAllEqual(numpy_image, actual.eval())
self.assertAllEqual(False, is_flipped.eval())
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=2)
self.assertAllEqual(numpy_image, actual.eval())
self.assertAllEqual(False, is_flipped.eval())
def testFlipWhenProbIsOne(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
dim0_flipped = np.dstack([[[9., 0.],
[5., 6.]],
[[3., 5.],
[4., 3.]]])
dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
dim2_flipped = np.dstack([[[4., 3.],
[3., 5.]],
[[5., 6.],
[9., 0.]]])
image = tf.convert_to_tensor(numpy_image)
with self.test_session():
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=0)
self.assertAllEqual(dim0_flipped, actual.eval())
self.assertAllEqual(True, is_flipped.eval())
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=1)
self.assertAllEqual(dim1_flipped, actual.eval())
self.assertAllEqual(True, is_flipped.eval())
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=2)
self.assertAllEqual(dim2_flipped, actual.eval())
self.assertAllEqual(True, is_flipped.eval())
def testFlipMultipleImagesConsistentlyWhenProbIsOne(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
numpy_label = np.dstack([[[0., 1.],
[2., 3.]]])
image_dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
label_dim1_flipped = np.dstack([[[1., 0.],
[3., 2.]]])
image = tf.convert_to_tensor(numpy_image)
label = tf.convert_to_tensor(numpy_label)
with self.test_session() as sess:
image, label, is_flipped = preprocess_utils.flip_dim(
[image, label], prob=1, dim=1)
actual_image, actual_label = sess.run([image, label])
self.assertAllEqual(image_dim1_flipped, actual_image)
self.assertAllEqual(label_dim1_flipped, actual_label)
self.assertEqual(True, is_flipped.eval())
def testReturnRandomFlipsOnMultipleEvals(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
image = tf.convert_to_tensor(numpy_image)
tf.compat.v1.set_random_seed(53)
with self.test_session() as sess:
actual, is_flipped = preprocess_utils.flip_dim(
[image], prob=0.5, dim=1)
actual_image, actual_is_flipped = sess.run([actual, is_flipped])
self.assertAllEqual(numpy_image, actual_image)
self.assertEqual(False, actual_is_flipped)
actual_image, actual_is_flipped = sess.run([actual, is_flipped])
self.assertAllEqual(dim1_flipped, actual_image)
self.assertEqual(True, actual_is_flipped)
def testReturnCorrectCropOfSingleImage(self):
np.random.seed(0)
height, width = 10, 20
image = np.random.randint(0, 256, size=(height, width, 3))
crop_height, crop_width = 2, 4
image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3))
[cropped] = preprocess_utils.random_crop([image_placeholder],
crop_height,
crop_width)
with self.test_session():
cropped_image = cropped.eval(feed_dict={image_placeholder: image})
# Ensure we can find the cropped image in the original:
is_found = False
for x in range(0, width - crop_width + 1):
for y in range(0, height - crop_height + 1):
if np.isclose(image[y:y+crop_height, x:x+crop_width, :],
cropped_image).all():
is_found = True
break
self.assertTrue(is_found)
def testRandomCropMaintainsNumberOfChannels(self):
np.random.seed(0)
crop_height, crop_width = 10, 20
image = np.random.randint(0, 256, size=(100, 200, 3))
tf.compat.v1.set_random_seed(37)
image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3))
[cropped] = preprocess_utils.random_crop(
[image_placeholder], crop_height, crop_width)
with self.test_session():
cropped_image = cropped.eval(feed_dict={image_placeholder: image})
self.assertTupleEqual(cropped_image.shape, (crop_height, crop_width, 3))
def testReturnDifferentCropAreasOnTwoEvals(self):
tf.compat.v1.set_random_seed(0)
crop_height, crop_width = 2, 3
image = np.random.randint(0, 256, size=(100, 200, 3))
image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3))
[cropped] = preprocess_utils.random_crop(
[image_placeholder], crop_height, crop_width)
with self.test_session():
crop0 = cropped.eval(feed_dict={image_placeholder: image})
crop1 = cropped.eval(feed_dict={image_placeholder: image})
self.assertFalse(np.isclose(crop0, crop1).all())
def testReturnConsistenCropsOfImagesInTheList(self):
tf.compat.v1.set_random_seed(0)
height, width = 10, 20
crop_height, crop_width = 2, 3
labels = np.linspace(0, height * width-1, height * width)
labels = labels.reshape((height, width, 1))
image = np.tile(labels, (1, 1, 3))
image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3))
label_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1))
[cropped_image, cropped_label] = preprocess_utils.random_crop(
[image_placeholder, label_placeholder], crop_height, crop_width)
with self.test_session() as sess:
cropped_image, cropped_labels = sess.run([cropped_image, cropped_label],
feed_dict={
image_placeholder: image,
label_placeholder: labels})
for i in range(3):
self.assertAllEqual(cropped_image[:, :, i], cropped_labels.squeeze())
def testDieOnRandomCropWhenImagesWithDifferentWidth(self):
crop_height, crop_width = 2, 3
image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3))
image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1))
cropped = preprocess_utils.random_crop(
[image1, image2], crop_height, crop_width)
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3),
image2: np.random.rand(4, 6, 1)})
def testDieOnRandomCropWhenImagesWithDifferentHeight(self):
crop_height, crop_width = 2, 3
image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3))
image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1))
cropped = preprocess_utils.random_crop(
[image1, image2], crop_height, crop_width)
with self.test_session() as sess:
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'Wrong height for tensor'):
sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3),
image2: np.random.rand(3, 5, 1)})
def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self):
crop_height, crop_width = 5, 9
image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3))
image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1))
cropped = preprocess_utils.random_crop(
[image1, image2], crop_height, crop_width)
with self.test_session() as sess:
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'Crop size greater than the image size.'):
sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3),
image2: np.random.rand(4, 5, 1)})
def testReturnPaddedImageWithNonZeroPadValue(self):
for dtype in [np.int32, np.int64, np.float32, np.float64]:
image = np.dstack([[[5, 6],
[9, 0]],
[[4, 3],
[3, 5]]]).astype(dtype)
expected_image = np.dstack([[[255, 255, 255, 255, 255],
[255, 255, 255, 255, 255],
[255, 5, 6, 255, 255],
[255, 9, 0, 255, 255],
[255, 255, 255, 255, 255]],
[[255, 255, 255, 255, 255],
[255, 255, 255, 255, 255],
[255, 4, 3, 255, 255],
[255, 3, 5, 255, 255],
[255, 255, 255, 255, 255]]]).astype(dtype)
with self.session() as sess:
padded_image = preprocess_utils.pad_to_bounding_box(
image, 2, 1, 5, 5, 255)
padded_image = sess.run(padded_image)
self.assertAllClose(padded_image, expected_image)
# Add batch size = 1 to image.
padded_image = preprocess_utils.pad_to_bounding_box(
np.expand_dims(image, 0), 2, 1, 5, 5, 255)
padded_image = sess.run(padded_image)
self.assertAllClose(padded_image, np.expand_dims(expected_image, 0))
def testReturnOriginalImageWhenTargetSizeIsEqualToImageSize(self):
image = np.dstack([[[5, 6],
[9, 0]],
[[4, 3],
[3, 5]]])
with self.session() as sess:
padded_image = preprocess_utils.pad_to_bounding_box(
image, 0, 0, 2, 2, 255)
padded_image = sess.run(padded_image)
self.assertAllClose(padded_image, image)
def testDieOnTargetSizeGreaterThanImageSize(self):
image = np.dstack([[[5, 6],
[9, 0]],
[[4, 3],
[3, 5]]])
with self.test_session():
image_placeholder = tf.placeholder(tf.float32)
padded_image = preprocess_utils.pad_to_bounding_box(
image_placeholder, 0, 0, 2, 1, 255)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'target_width must be >= width'):
padded_image.eval(feed_dict={image_placeholder: image})
padded_image = preprocess_utils.pad_to_bounding_box(
image_placeholder, 0, 0, 1, 2, 255)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'target_height must be >= height'):
padded_image.eval(feed_dict={image_placeholder: image})
def testDieIfTargetSizeNotPossibleWithGivenOffset(self):
image = np.dstack([[[5, 6],
[9, 0]],
[[4, 3],
[3, 5]]])
with self.test_session():
image_placeholder = tf.placeholder(tf.float32)
padded_image = preprocess_utils.pad_to_bounding_box(
image_placeholder, 3, 0, 4, 4, 255)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'target size not possible with the given target offsets'):
padded_image.eval(feed_dict={image_placeholder: image})
def testDieIfImageTensorRankIsTwo(self):
image = np.vstack([[5, 6],
[9, 0]])
with self.test_session():
image_placeholder = tf.placeholder(tf.float32)
padded_image = preprocess_utils.pad_to_bounding_box(
image_placeholder, 0, 0, 2, 2, 255)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
'Wrong image tensor rank'):
padded_image.eval(feed_dict={image_placeholder: image})
def testResizeTensorsToRange(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
min_size = 50
max_size = 100
factor = None
expected_shape_list = [(75, 50, 3),
(50, 100, 3),
(30, 100, 3)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=None,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
with self.test_session() as session:
resized_image = session.run(new_tensor_list[0])
self.assertEqual(resized_image.shape, expected_shape_list[i])
def testResizeTensorsToRangeWithFactor(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
min_size = 50
max_size = 98
factor = 8
expected_image_shape_list = [(81, 57, 3),
(49, 97, 3),
(33, 97, 3)]
expected_label_shape_list = [(81, 57, 1),
(49, 97, 1),
(33, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithFactorAndLabelShapeCHW(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
min_size = 50
max_size = 98
factor = 8
expected_image_shape_list = [(81, 57, 3),
(49, 97, 3),
(33, 97, 3)]
expected_label_shape_list = [(5, 81, 57),
(5, 49, 97),
(5, 33, 97)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([5, test_shape[0], test_shape[1]])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True,
label_layout_is_chw=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithSimilarMinMaxSizes(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
# Values set so that one of the side = 97.
min_size = 96
max_size = 98
factor = 8
expected_image_shape_list = [(97, 65, 3),
(49, 97, 3),
(33, 97, 3)]
expected_label_shape_list = [(97, 65, 1),
(49, 97, 1),
(33, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithEqualMaxSize(self):
test_shapes = [[97, 38],
[96, 97]]
# Make max_size equal to the larger value of test_shapes.
min_size = 97
max_size = 97
factor = 8
expected_image_shape_list = [(97, 41, 3),
(97, 97, 3)]
expected_label_shape_list = [(97, 41, 1),
(97, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithPotentialErrorInTFCeil(self):
test_shape = [3936, 5248]
# Make max_size equal to the larger value of test_shapes.
min_size = 1441
max_size = 1441
factor = 16
expected_image_shape = (1089, 1441, 3)
expected_label_shape = (1089, 1441, 1)
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape)
self.assertEqual(new_tensor_list[1].shape, expected_label_shape)
def testResizeTensorsToRangeWithEqualMaxSizeWithoutAspectRatio(self):
test_shapes = [[97, 38],
[96, 97]]
# Make max_size equal to the larger value of test_shapes.
min_size = 97
max_size = 97
factor = 8
keep_aspect_ratio = False
expected_image_shape_list = [(97, 97, 3),
(97, 97, 3)]
expected_label_shape_list = [(97, 97, 1),
(97, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
keep_aspect_ratio=keep_aspect_ratio,
align_corners=True)
with self.test_session() as session:
new_tensor_list = session.run(new_tensor_list)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
if __name__ == '__main__':
tf.test.main()
| 21,378 | 40.432171 | 80 | py |
models | models-master/research/deeplab/core/nas_cell.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cell structure used by NAS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import xception as xception_utils
from deeplab.core.utils import resize_bilinear
from deeplab.core.utils import scale_dimension
from tensorflow.contrib.slim.nets import resnet_utils
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
separable_conv2d_same = functools.partial(xception_utils.separable_conv2d_same,
regularize_depthwise=True)
class NASBaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures."""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, batch_norm_fn=slim.batch_norm):
"""Init function.
For more details about NAS cell, see
https://arxiv.org/abs/1707.07012 and https://arxiv.org/abs/1712.00559.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
drop_path_keep_prob: Float, drop path keep probability.
total_num_cells: Integer, total number of cells.
total_training_steps: Integer, total training steps.
batch_norm_fn: Function, batch norm function. Defaults to
slim.batch_norm.
"""
if len(hiddenstate_indices) != len(operations):
raise ValueError(
'Number of hiddenstate_indices and operations should be the same.')
if len(operations) % 2:
raise ValueError('Number of operations should be even.')
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._batch_norm_fn = batch_norm_fn
def __call__(self, net, scope, filter_scaling, stride, prev_layer, cell_num):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for i in range(len(self._operations) // 2):
with tf.variable_scope('comb_iter_{}'.format(i)):
h1 = net[self._hiddenstate_indices[i * 2]]
h2 = net[self._hiddenstate_indices[i * 2 + 1]]
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(
h1, self._operations[i * 2], stride,
self._hiddenstate_indices[i * 2] < 2)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(
h2, self._operations[i * 2 + 1], stride,
self._hiddenstate_indices[i * 2 + 1] < 2)
with tf.variable_scope('combine'):
h = h1 + h2
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the chosen ops are run."""
filter_size = self._filter_size
if prev_layer is None:
prev_layer = net
else:
if net.shape[2] != prev_layer.shape[2]:
prev_layer = resize_bilinear(
prev_layer, tf.shape(net)[1:3], prev_layer.dtype)
if filter_size != prev_layer.shape[3]:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = slim.conv2d(prev_layer, filter_size, 1, scope='prev_1x1')
prev_layer = self._batch_norm_fn(prev_layer, scope='prev_bn')
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, scope='1x1')
net = self._batch_norm_fn(net, scope='beginning_bn')
net = tf.split(axis=3, num_or_size_splits=1, value=net)
net.append(prev_layer)
return net
def _apply_conv_operation(self, net, operation, stride,
is_from_original_input):
"""Applies the predicted conv operation to net."""
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = net.shape[3]
filter_size = self._filter_size
if 'separable' in operation:
num_layers = int(operation.split('_')[-1])
kernel_size = int(operation.split('x')[0][-1])
for layer_num in range(num_layers):
net = tf.nn.relu(net)
net = separable_conv2d_same(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = self._batch_norm_fn(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
elif 'atrous' in operation:
kernel_size = int(operation.split('x')[0][-1])
net = tf.nn.relu(net)
if stride == 2:
scaled_height = scale_dimension(tf.shape(net)[1], 0.5)
scaled_width = scale_dimension(tf.shape(net)[2], 0.5)
net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype)
net = resnet_utils.conv2d_same(
net, filter_size, kernel_size, rate=1, stride=1,
scope='atrous_{0}x{0}'.format(kernel_size))
else:
net = resnet_utils.conv2d_same(
net, filter_size, kernel_size, rate=2, stride=1,
scope='atrous_{0}x{0}'.format(kernel_size))
net = self._batch_norm_fn(net, scope='bn_atr_{0}x{0}'.format(kernel_size))
elif operation in ['none']:
if stride > 1 or (input_filters != filter_size):
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = self._batch_norm_fn(net, scope='bn_1')
elif 'pool' in operation:
pooling_type = operation.split('_')[0]
pooling_shape = int(operation.split('_')[-1].split('x')[0])
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding='SAME')
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding='SAME')
else:
raise ValueError('Unimplemented pooling type: ', pooling_type)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = self._batch_norm_fn(net, scope='bn_1')
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net)
return net
def _combine_unused_states(self, net):
"""Concatenates the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
states_to_combine = ([
h for h, is_used in zip(net, used_hiddenstates) if not is_used])
net = tf.concat(values=states_to_combine, axis=3)
return net
@contrib_framework.add_arg_scope
def _apply_drop_path(self, net):
"""Apply drop_path regularization."""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
# Scale keep prob by layer number.
assert self._cell_num != -1
layer_ratio = (self._cell_num + 1) / float(self._total_num_cells)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
# Decrease keep prob over time.
current_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
current_ratio = tf.minimum(1.0, current_step / self._total_training_steps)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
# Drop path.
noise_shape = [tf.shape(net)[0], 1, 1, 1]
random_tensor = drop_path_keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / drop_path_keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
| 9,381 | 41.261261 | 80 | py |
models | models-master/research/deeplab/core/feature_extractor.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts features for different models."""
import copy
import functools
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import nas_network
from deeplab.core import resnet_v1_beta
from deeplab.core import xception
from nets.mobilenet import conv_blocks
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
from nets.mobilenet import mobilenet_v3
slim = contrib_slim
# Default end point for MobileNetv2 (one-based indexing).
_MOBILENET_V2_FINAL_ENDPOINT = 'layer_18'
# Default end point for MobileNetv3.
_MOBILENET_V3_LARGE_FINAL_ENDPOINT = 'layer_17'
_MOBILENET_V3_SMALL_FINAL_ENDPOINT = 'layer_13'
# Default end point for EdgeTPU Mobilenet.
_MOBILENET_EDGETPU = 'layer_24'
def _mobilenet_v2(net,
depth_multiplier,
output_stride,
conv_defs=None,
divisible_by=None,
reuse=None,
scope=None,
final_endpoint=None):
"""Auxiliary function to add support for 'reuse' to mobilenet_v2.
Args:
net: Input tensor of shape [batch_size, height, width, channels].
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
conv_defs: MobileNet con def.
divisible_by: None (use default setting) or an integer that ensures all
layers # channels will be divisible by this number. Used in MobileNet.
reuse: Reuse model variables.
scope: Optional variable scope.
final_endpoint: The endpoint to construct the network up to.
Returns:
Features extracted by MobileNetv2.
"""
if divisible_by is None:
divisible_by = 8 if depth_multiplier == 1.0 else 1
if conv_defs is None:
conv_defs = mobilenet_v2.V2_DEF
with tf.variable_scope(
scope, 'MobilenetV2', [net], reuse=reuse) as scope:
return mobilenet_v2.mobilenet_base(
net,
conv_defs=conv_defs,
depth_multiplier=depth_multiplier,
min_depth=8 if depth_multiplier == 1.0 else 1,
divisible_by=divisible_by,
final_endpoint=final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT,
output_stride=output_stride,
scope=scope)
def _mobilenet_v3(net,
depth_multiplier,
output_stride,
conv_defs=None,
divisible_by=None,
reuse=None,
scope=None,
final_endpoint=None):
"""Auxiliary function to build mobilenet v3.
Args:
net: Input tensor of shape [batch_size, height, width, channels].
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
divisible_by: None (use default setting) or an integer that ensures all
layers # channels will be divisible by this number. Used in MobileNet.
reuse: Reuse model variables.
scope: Optional variable scope.
final_endpoint: The endpoint to construct the network up to.
Returns:
net: The output tensor.
end_points: A set of activations for external use.
Raises:
ValueError: If conv_defs or final_endpoint is not specified.
"""
del divisible_by
with tf.variable_scope(
scope, 'MobilenetV3', [net], reuse=reuse) as scope:
if conv_defs is None:
raise ValueError('conv_defs must be specified for mobilenet v3.')
if final_endpoint is None:
raise ValueError('Final endpoint must be specified for mobilenet v3.')
net, end_points = mobilenet_v3.mobilenet_base(
net,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs,
output_stride=output_stride,
final_endpoint=final_endpoint,
scope=scope)
return net, end_points
def mobilenet_v3_large_seg(net,
depth_multiplier,
output_stride,
divisible_by=None,
reuse=None,
scope=None,
final_endpoint=None):
"""Final mobilenet v3 large model for segmentation task."""
del divisible_by
del final_endpoint
conv_defs = copy.deepcopy(mobilenet_v3.V3_LARGE)
# Reduce the filters by a factor of 2 in the last block.
for layer, expansion in [(13, 336), (14, 480), (15, 480), (16, None)]:
conv_defs['spec'][layer].params['num_outputs'] /= 2
# Update expansion size
if expansion is not None:
factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs']
conv_defs['spec'][layer].params[
'expansion_size'] = mobilenet_v3.expand_input(factor)
return _mobilenet_v3(
net,
depth_multiplier=depth_multiplier,
output_stride=output_stride,
divisible_by=8,
conv_defs=conv_defs,
reuse=reuse,
scope=scope,
final_endpoint=_MOBILENET_V3_LARGE_FINAL_ENDPOINT)
def mobilenet_edgetpu(net,
depth_multiplier,
output_stride,
divisible_by=None,
reuse=None,
scope=None,
final_endpoint=None):
"""EdgeTPU version of mobilenet model for segmentation task."""
del divisible_by
del final_endpoint
conv_defs = copy.deepcopy(mobilenet_v3.V3_EDGETPU)
return _mobilenet_v3(
net,
depth_multiplier=depth_multiplier,
output_stride=output_stride,
divisible_by=8,
conv_defs=conv_defs,
reuse=reuse,
scope=scope, # the scope is 'MobilenetEdgeTPU'
final_endpoint=_MOBILENET_EDGETPU)
def mobilenet_v3_small_seg(net,
depth_multiplier,
output_stride,
divisible_by=None,
reuse=None,
scope=None,
final_endpoint=None):
"""Final mobilenet v3 small model for segmentation task."""
del divisible_by
del final_endpoint
conv_defs = copy.deepcopy(mobilenet_v3.V3_SMALL)
# Reduce the filters by a factor of 2 in the last block.
for layer, expansion in [(9, 144), (10, 288), (11, 288), (12, None)]:
conv_defs['spec'][layer].params['num_outputs'] /= 2
# Update expansion size
if expansion is not None:
factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs']
conv_defs['spec'][layer].params[
'expansion_size'] = mobilenet_v3.expand_input(factor)
return _mobilenet_v3(
net,
depth_multiplier=depth_multiplier,
output_stride=output_stride,
divisible_by=8,
conv_defs=conv_defs,
reuse=reuse,
scope=scope,
final_endpoint=_MOBILENET_V3_SMALL_FINAL_ENDPOINT)
# A map from network name to network function.
networks_map = {
'mobilenet_v2': _mobilenet_v2,
'mobilenet_edgetpu': mobilenet_edgetpu,
'mobilenet_v3_large_seg': mobilenet_v3_large_seg,
'mobilenet_v3_small_seg': mobilenet_v3_small_seg,
'resnet_v1_18': resnet_v1_beta.resnet_v1_18,
'resnet_v1_18_beta': resnet_v1_beta.resnet_v1_18_beta,
'resnet_v1_50': resnet_v1_beta.resnet_v1_50,
'resnet_v1_50_beta': resnet_v1_beta.resnet_v1_50_beta,
'resnet_v1_101': resnet_v1_beta.resnet_v1_101,
'resnet_v1_101_beta': resnet_v1_beta.resnet_v1_101_beta,
'xception_41': xception.xception_41,
'xception_65': xception.xception_65,
'xception_71': xception.xception_71,
'nas_pnasnet': nas_network.pnasnet,
'nas_hnasnet': nas_network.hnasnet,
}
def mobilenet_v2_arg_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
activation=tf.nn.relu6,
bn_decay=0.997,
bn_epsilon=None,
bn_renorm=None):
"""Defines the default MobilenetV2 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to None
is_training parameter in batch_norm is not set. Please note that this also
sets the is_training parameter in dropout to None.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
activation: If True, a modified activation is used (initialized ~ReLU6).
bn_decay: decay for the batch norm moving averages.
bn_epsilon: batch normalization epsilon.
bn_renorm: whether to use batchnorm renormalization
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': bn_decay,
}
if bn_epsilon is not None:
batch_norm_params['epsilon'] = bn_epsilon
if is_training is not None:
batch_norm_params['is_training'] = is_training
if bn_renorm is not None:
batch_norm_params['renorm'] = bn_renorm
dropout_params = {}
if is_training is not None:
dropout_params['is_training'] = is_training
instance_norm_params = {
'center': True,
'scale': True,
'epsilon': 0.001,
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
activation_fn=activation,
normalizer_fn=slim.batch_norm), \
slim.arg_scope(
[conv_blocks.expanded_conv], normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet.apply_activation], activation_fn=activation),\
slim.arg_scope([slim.batch_norm], **batch_norm_params), \
slim.arg_scope([mobilenet.mobilenet_base, mobilenet.mobilenet],
is_training=is_training),\
slim.arg_scope([slim.dropout], **dropout_params), \
slim.arg_scope([slim.instance_norm], **instance_norm_params), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None), \
slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME') as s:
return s
# A map from network name to network arg scope.
arg_scopes_map = {
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_edgetpu': mobilenet_v2_arg_scope,
'mobilenet_v3_large_seg': mobilenet_v2_arg_scope,
'mobilenet_v3_small_seg': mobilenet_v2_arg_scope,
'resnet_v1_18': resnet_v1_beta.resnet_arg_scope,
'resnet_v1_18_beta': resnet_v1_beta.resnet_arg_scope,
'resnet_v1_50': resnet_v1_beta.resnet_arg_scope,
'resnet_v1_50_beta': resnet_v1_beta.resnet_arg_scope,
'resnet_v1_101': resnet_v1_beta.resnet_arg_scope,
'resnet_v1_101_beta': resnet_v1_beta.resnet_arg_scope,
'xception_41': xception.xception_arg_scope,
'xception_65': xception.xception_arg_scope,
'xception_71': xception.xception_arg_scope,
'nas_pnasnet': nas_network.nas_arg_scope,
'nas_hnasnet': nas_network.nas_arg_scope,
}
# Names for end point features.
DECODER_END_POINTS = 'decoder_end_points'
# A dictionary from network name to a map of end point features.
networks_to_feature_maps = {
'mobilenet_v2': {
DECODER_END_POINTS: {
4: ['layer_4/depthwise_output'],
8: ['layer_7/depthwise_output'],
16: ['layer_14/depthwise_output'],
},
},
'mobilenet_v3_large_seg': {
DECODER_END_POINTS: {
4: ['layer_4/depthwise_output'],
8: ['layer_7/depthwise_output'],
16: ['layer_13/depthwise_output'],
},
},
'mobilenet_v3_small_seg': {
DECODER_END_POINTS: {
4: ['layer_2/depthwise_output'],
8: ['layer_4/depthwise_output'],
16: ['layer_9/depthwise_output'],
},
},
'resnet_v1_18': {
DECODER_END_POINTS: {
4: ['block1/unit_1/lite_bottleneck_v1/conv2'],
8: ['block2/unit_1/lite_bottleneck_v1/conv2'],
16: ['block3/unit_1/lite_bottleneck_v1/conv2'],
},
},
'resnet_v1_18_beta': {
DECODER_END_POINTS: {
4: ['block1/unit_1/lite_bottleneck_v1/conv2'],
8: ['block2/unit_1/lite_bottleneck_v1/conv2'],
16: ['block3/unit_1/lite_bottleneck_v1/conv2'],
},
},
'resnet_v1_50': {
DECODER_END_POINTS: {
4: ['block1/unit_2/bottleneck_v1/conv3'],
8: ['block2/unit_3/bottleneck_v1/conv3'],
16: ['block3/unit_5/bottleneck_v1/conv3'],
},
},
'resnet_v1_50_beta': {
DECODER_END_POINTS: {
4: ['block1/unit_2/bottleneck_v1/conv3'],
8: ['block2/unit_3/bottleneck_v1/conv3'],
16: ['block3/unit_5/bottleneck_v1/conv3'],
},
},
'resnet_v1_101': {
DECODER_END_POINTS: {
4: ['block1/unit_2/bottleneck_v1/conv3'],
8: ['block2/unit_3/bottleneck_v1/conv3'],
16: ['block3/unit_22/bottleneck_v1/conv3'],
},
},
'resnet_v1_101_beta': {
DECODER_END_POINTS: {
4: ['block1/unit_2/bottleneck_v1/conv3'],
8: ['block2/unit_3/bottleneck_v1/conv3'],
16: ['block3/unit_22/bottleneck_v1/conv3'],
},
},
'xception_41': {
DECODER_END_POINTS: {
4: ['entry_flow/block2/unit_1/xception_module/'
'separable_conv2_pointwise'],
8: ['entry_flow/block3/unit_1/xception_module/'
'separable_conv2_pointwise'],
16: ['exit_flow/block1/unit_1/xception_module/'
'separable_conv2_pointwise'],
},
},
'xception_65': {
DECODER_END_POINTS: {
4: ['entry_flow/block2/unit_1/xception_module/'
'separable_conv2_pointwise'],
8: ['entry_flow/block3/unit_1/xception_module/'
'separable_conv2_pointwise'],
16: ['exit_flow/block1/unit_1/xception_module/'
'separable_conv2_pointwise'],
},
},
'xception_71': {
DECODER_END_POINTS: {
4: ['entry_flow/block3/unit_1/xception_module/'
'separable_conv2_pointwise'],
8: ['entry_flow/block5/unit_1/xception_module/'
'separable_conv2_pointwise'],
16: ['exit_flow/block1/unit_1/xception_module/'
'separable_conv2_pointwise'],
},
},
'nas_pnasnet': {
DECODER_END_POINTS: {
4: ['Stem'],
8: ['Cell_3'],
16: ['Cell_7'],
},
},
'nas_hnasnet': {
DECODER_END_POINTS: {
4: ['Cell_2'],
8: ['Cell_5'],
16: ['Cell_7'],
},
},
}
# A map from feature extractor name to the network name scope used in the
# ImageNet pretrained versions of these models.
name_scope = {
'mobilenet_v2': 'MobilenetV2',
'mobilenet_edgetpu': 'MobilenetEdgeTPU',
'mobilenet_v3_large_seg': 'MobilenetV3',
'mobilenet_v3_small_seg': 'MobilenetV3',
'resnet_v1_18': 'resnet_v1_18',
'resnet_v1_18_beta': 'resnet_v1_18',
'resnet_v1_50': 'resnet_v1_50',
'resnet_v1_50_beta': 'resnet_v1_50',
'resnet_v1_101': 'resnet_v1_101',
'resnet_v1_101_beta': 'resnet_v1_101',
'xception_41': 'xception_41',
'xception_65': 'xception_65',
'xception_71': 'xception_71',
'nas_pnasnet': 'pnasnet',
'nas_hnasnet': 'hnasnet',
}
# Mean pixel value.
_MEAN_RGB = [123.15, 115.90, 103.06]
def _preprocess_subtract_imagenet_mean(inputs, dtype=tf.float32):
"""Subtract Imagenet mean RGB value."""
mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3])
num_channels = tf.shape(inputs)[-1]
# We set mean pixel as 0 for the non-RGB channels.
mean_rgb_extended = tf.concat(
[mean_rgb, tf.zeros([1, 1, 1, num_channels - 3])], axis=3)
return tf.cast(inputs - mean_rgb_extended, dtype=dtype)
def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32):
"""Map image values from [0, 255] to [-1, 1]."""
preprocessed_inputs = (2.0 / 255.0) * tf.to_float(inputs) - 1.0
return tf.cast(preprocessed_inputs, dtype=dtype)
_PREPROCESS_FN = {
'mobilenet_v2': _preprocess_zero_mean_unit_range,
'mobilenet_edgetpu': _preprocess_zero_mean_unit_range,
'mobilenet_v3_large_seg': _preprocess_zero_mean_unit_range,
'mobilenet_v3_small_seg': _preprocess_zero_mean_unit_range,
'resnet_v1_18': _preprocess_subtract_imagenet_mean,
'resnet_v1_18_beta': _preprocess_zero_mean_unit_range,
'resnet_v1_50': _preprocess_subtract_imagenet_mean,
'resnet_v1_50_beta': _preprocess_zero_mean_unit_range,
'resnet_v1_101': _preprocess_subtract_imagenet_mean,
'resnet_v1_101_beta': _preprocess_zero_mean_unit_range,
'xception_41': _preprocess_zero_mean_unit_range,
'xception_65': _preprocess_zero_mean_unit_range,
'xception_71': _preprocess_zero_mean_unit_range,
'nas_pnasnet': _preprocess_zero_mean_unit_range,
'nas_hnasnet': _preprocess_zero_mean_unit_range,
}
def mean_pixel(model_variant=None):
"""Gets mean pixel value.
This function returns different mean pixel value, depending on the input
model_variant which adopts different preprocessing functions. We currently
handle the following preprocessing functions:
(1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value.
(2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5].
The return values are used in a way that the padded regions after
pre-processing will contain value 0.
Args:
model_variant: Model variant (string) for feature extraction. For
backwards compatibility, model_variant=None returns _MEAN_RGB.
Returns:
Mean pixel value.
"""
if model_variant in ['resnet_v1_50',
'resnet_v1_101'] or model_variant is None:
return _MEAN_RGB
else:
return [127.5, 127.5, 127.5]
def extract_features(images,
output_stride=8,
multi_grid=None,
depth_multiplier=1.0,
divisible_by=None,
final_endpoint=None,
model_variant=None,
weight_decay=0.0001,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
regularize_depthwise=False,
preprocess_images=True,
preprocessed_images_dtype=tf.float32,
num_classes=None,
global_pool=False,
nas_architecture_options=None,
nas_training_hyper_parameters=None,
use_bounded_activation=False):
"""Extracts features by the particular model_variant.
Args:
images: A tensor of size [batch, height, width, channels].
output_stride: The ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops used in MobileNet.
divisible_by: None (use default setting) or an integer that ensures all
layers # channels will be divisible by this number. Used in MobileNet.
final_endpoint: The MobileNet endpoint to construct the network up to.
model_variant: Model variant for feature extraction.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
preprocess_images: Performs preprocessing on images or not. Defaults to
True. Set to False if preprocessing will be done by other functions. We
supprot two types of preprocessing: (1) Mean pixel substraction and (2)
Pixel values normalization to be [-1, 1].
preprocessed_images_dtype: The type after the preprocessing function.
num_classes: Number of classes for image classification task. Defaults
to None for dense prediction tasks.
global_pool: Global pooling for image classification task. Defaults to
False, since dense prediction tasks do not use this.
nas_architecture_options: A dictionary storing NAS architecture options.
It is either None or its kerys are:
- `nas_stem_output_num_conv_filters`: Number of filters of the NAS stem
output tensor.
- `nas_use_classification_head`: Boolean, use image classification head.
nas_training_hyper_parameters: A dictionary storing hyper-parameters for
training nas models. It is either None or its keys are:
- `drop_path_keep_prob`: Probability to keep each path in the cell when
training.
- `total_training_steps`: Total training steps to help drop path
probability calculation.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference. Currently,
bounded activation is only used in xception model.
Returns:
features: A tensor of size [batch, feature_height, feature_width,
feature_channels], where feature_height/feature_width are determined
by the images height/width and output_stride.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Unrecognized model variant.
"""
if 'resnet' in model_variant:
arg_scope = arg_scopes_map[model_variant](
weight_decay=weight_decay,
batch_norm_decay=0.95,
batch_norm_epsilon=1e-5,
batch_norm_scale=True)
features, end_points = get_network(
model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)(
inputs=images,
num_classes=num_classes,
is_training=(is_training and fine_tune_batch_norm),
global_pool=global_pool,
output_stride=output_stride,
multi_grid=multi_grid,
reuse=reuse,
scope=name_scope[model_variant])
elif 'xception' in model_variant:
arg_scope = arg_scopes_map[model_variant](
weight_decay=weight_decay,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3,
batch_norm_scale=True,
regularize_depthwise=regularize_depthwise,
use_bounded_activation=use_bounded_activation)
features, end_points = get_network(
model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)(
inputs=images,
num_classes=num_classes,
is_training=(is_training and fine_tune_batch_norm),
global_pool=global_pool,
output_stride=output_stride,
regularize_depthwise=regularize_depthwise,
multi_grid=multi_grid,
reuse=reuse,
scope=name_scope[model_variant])
elif 'mobilenet' in model_variant or model_variant.startswith('mnas'):
arg_scope = arg_scopes_map[model_variant](
is_training=(is_training and fine_tune_batch_norm),
weight_decay=weight_decay)
features, end_points = get_network(
model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)(
inputs=images,
depth_multiplier=depth_multiplier,
divisible_by=divisible_by,
output_stride=output_stride,
reuse=reuse,
scope=name_scope[model_variant],
final_endpoint=final_endpoint)
elif model_variant.startswith('nas'):
arg_scope = arg_scopes_map[model_variant](
weight_decay=weight_decay,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3)
features, end_points = get_network(
model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)(
inputs=images,
num_classes=num_classes,
is_training=(is_training and fine_tune_batch_norm),
global_pool=global_pool,
output_stride=output_stride,
nas_architecture_options=nas_architecture_options,
nas_training_hyper_parameters=nas_training_hyper_parameters,
reuse=reuse,
scope=name_scope[model_variant])
else:
raise ValueError('Unknown model variant %s.' % model_variant)
return features, end_points
def get_network(network_name, preprocess_images,
preprocessed_images_dtype=tf.float32, arg_scope=None):
"""Gets the network.
Args:
network_name: Network name.
preprocess_images: Preprocesses the images or not.
preprocessed_images_dtype: The type after the preprocessing function.
arg_scope: Optional, arg_scope to build the network. If not provided the
default arg_scope of the network would be used.
Returns:
A network function that is used to extract features.
Raises:
ValueError: network is not supported.
"""
if network_name not in networks_map:
raise ValueError('Unsupported network %s.' % network_name)
arg_scope = arg_scope or arg_scopes_map[network_name]()
def _identity_function(inputs, dtype=preprocessed_images_dtype):
return tf.cast(inputs, dtype=dtype)
if preprocess_images:
preprocess_function = _PREPROCESS_FN[network_name]
else:
preprocess_function = _identity_function
func = networks_map[network_name]
@functools.wraps(func)
def network_fn(inputs, *args, **kwargs):
with slim.arg_scope(arg_scope):
return func(preprocess_function(inputs, preprocessed_images_dtype),
*args, **kwargs)
return network_fn
| 27,688 | 37.889045 | 80 | py |
models | models-master/research/deeplab/core/xception_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for xception.py."""
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import xception
from tensorflow.contrib.slim.nets import resnet_utils
slim = contrib_slim
def create_test_input(batch, height, width, channels):
"""Create test input tensor."""
if None in [batch, height, width, channels]:
return tf.placeholder(tf.float32, (batch, height, width, channels))
else:
return tf.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch, 1, 1, channels]),
tf.float32)
class UtilityFunctionTest(tf.test.TestCase):
def testSeparableConv2DSameWithInputEvenSize(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
dw = create_test_input(1, 3, 3, 1)
dw = tf.reshape(dw, [3, 3, 1, 1])
tf.get_variable('Conv/depthwise_weights', initializer=dw)
tf.get_variable('Conv/pointwise_weights',
initializer=tf.ones([1, 1, 1, 1]))
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]], tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43],
[43, 84]], tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1,
regularize_depthwise=True,
stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
stride=2, scope='Conv')
y4_expected = tf.cast([[48, 37],
[37, 22]], tf.float32)
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testSeparableConv2DSameWithInputOddSize(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
dw = create_test_input(1, 3, 3, 1)
dw = tf.reshape(dw, [3, 3, 1, 1])
tf.get_variable('Conv/depthwise_weights', initializer=dw)
tf.get_variable('Conv/pointwise_weights',
initializer=tf.ones([1, 1, 1, 1]))
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]], tf.float32)
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.cast([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]], tf.float32)
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1,
regularize_depthwise=True,
stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
class XceptionNetworkTest(tf.test.TestCase):
"""Tests with small Xception network."""
def _xception_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
regularize_depthwise=True,
reuse=None,
scope='xception_small'):
"""A shallow and thin Xception for faster tests."""
block = xception.xception_block
blocks = [
block('entry_flow/block1',
depth_list=[1, 1, 1],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
block('entry_flow/block2',
depth_list=[2, 2, 2],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
block('entry_flow/block3',
depth_list=[4, 4, 4],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
block('entry_flow/block4',
depth_list=[4, 4, 4],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
block('middle_flow/block1',
depth_list=[4, 4, 4],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=2,
stride=1),
block('exit_flow/block1',
depth_list=[8, 8, 8],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
block('exit_flow/block2',
depth_list=[16, 16, 16],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
]
return xception.xception(inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 3
inputs = create_test_input(2, 32, 32, 3)
with slim.arg_scope(xception.xception_arg_scope()):
logits, end_points = self._xception_small(
inputs,
num_classes=num_classes,
global_pool=global_pool,
scope='xception')
self.assertTrue(
logits.op.name.startswith('xception/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 16])
def testEndpointNames(self):
global_pool = True
num_classes = 3
inputs = create_test_input(2, 32, 32, 3)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points = self._xception_small(
inputs,
num_classes=num_classes,
global_pool=global_pool,
scope='xception')
expected = [
'xception/entry_flow/conv1_1',
'xception/entry_flow/conv1_2',
'xception/entry_flow/block1/unit_1/xception_module/separable_conv1',
'xception/entry_flow/block1/unit_1/xception_module/separable_conv2',
'xception/entry_flow/block1/unit_1/xception_module/separable_conv3',
'xception/entry_flow/block1/unit_1/xception_module/shortcut',
'xception/entry_flow/block1/unit_1/xception_module',
'xception/entry_flow/block1',
'xception/entry_flow/block2/unit_1/xception_module/separable_conv1',
'xception/entry_flow/block2/unit_1/xception_module/separable_conv2',
'xception/entry_flow/block2/unit_1/xception_module/separable_conv3',
'xception/entry_flow/block2/unit_1/xception_module/shortcut',
'xception/entry_flow/block2/unit_1/xception_module',
'xception/entry_flow/block2',
'xception/entry_flow/block3/unit_1/xception_module/separable_conv1',
'xception/entry_flow/block3/unit_1/xception_module/separable_conv2',
'xception/entry_flow/block3/unit_1/xception_module/separable_conv3',
'xception/entry_flow/block3/unit_1/xception_module/shortcut',
'xception/entry_flow/block3/unit_1/xception_module',
'xception/entry_flow/block3',
'xception/entry_flow/block4/unit_1/xception_module/separable_conv1',
'xception/entry_flow/block4/unit_1/xception_module/separable_conv2',
'xception/entry_flow/block4/unit_1/xception_module/separable_conv3',
'xception/entry_flow/block4/unit_1/xception_module/shortcut',
'xception/entry_flow/block4/unit_1/xception_module',
'xception/entry_flow/block4',
'xception/middle_flow/block1/unit_1/xception_module/separable_conv1',
'xception/middle_flow/block1/unit_1/xception_module/separable_conv2',
'xception/middle_flow/block1/unit_1/xception_module/separable_conv3',
'xception/middle_flow/block1/unit_1/xception_module',
'xception/middle_flow/block1/unit_2/xception_module/separable_conv1',
'xception/middle_flow/block1/unit_2/xception_module/separable_conv2',
'xception/middle_flow/block1/unit_2/xception_module/separable_conv3',
'xception/middle_flow/block1/unit_2/xception_module',
'xception/middle_flow/block1',
'xception/exit_flow/block1/unit_1/xception_module/separable_conv1',
'xception/exit_flow/block1/unit_1/xception_module/separable_conv2',
'xception/exit_flow/block1/unit_1/xception_module/separable_conv3',
'xception/exit_flow/block1/unit_1/xception_module/shortcut',
'xception/exit_flow/block1/unit_1/xception_module',
'xception/exit_flow/block1',
'xception/exit_flow/block2/unit_1/xception_module/separable_conv1',
'xception/exit_flow/block2/unit_1/xception_module/separable_conv2',
'xception/exit_flow/block2/unit_1/xception_module/separable_conv3',
'xception/exit_flow/block2/unit_1/xception_module',
'xception/exit_flow/block2',
'global_pool',
'xception/logits',
'predictions',
]
self.assertItemsEqual(list(end_points.keys()), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 3
inputs = create_test_input(2, 64, 64, 3)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points = self._xception_small(
inputs,
num_classes,
global_pool=global_pool,
scope='xception')
endpoint_to_shape = {
'xception/entry_flow/conv1_1': [2, 32, 32, 32],
'xception/entry_flow/block1': [2, 16, 16, 1],
'xception/entry_flow/block2': [2, 8, 8, 2],
'xception/entry_flow/block4': [2, 4, 4, 4],
'xception/middle_flow/block1': [2, 4, 4, 4],
'xception/exit_flow/block1': [2, 2, 2, 8],
'xception/exit_flow/block2': [2, 2, 2, 16]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 3
inputs = create_test_input(2, 65, 65, 3)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points = self._xception_small(
inputs,
num_classes,
global_pool=global_pool,
scope='xception')
endpoint_to_shape = {
'xception/entry_flow/conv1_1': [2, 33, 33, 32],
'xception/entry_flow/block1': [2, 17, 17, 1],
'xception/entry_flow/block2': [2, 9, 9, 2],
'xception/entry_flow/block4': [2, 5, 5, 4],
'xception/middle_flow/block1': [2, 5, 5, 4],
'xception/exit_flow/block1': [2, 3, 3, 8],
'xception/exit_flow/block2': [2, 3, 3, 16]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 3
output_stride = 8
inputs = create_test_input(2, 65, 65, 3)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points = self._xception_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='xception')
endpoint_to_shape = {
'xception/entry_flow/block1': [2, 17, 17, 1],
'xception/entry_flow/block2': [2, 9, 9, 2],
'xception/entry_flow/block4': [2, 9, 9, 4],
'xception/middle_flow/block1': [2, 9, 9, 4],
'xception/exit_flow/block1': [2, 9, 9, 8],
'xception/exit_flow/block2': [2, 9, 9, 16]}
for endpoint, shape in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(xception.xception_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 96, 97, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._xception_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._xception_small(
inputs,
None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-5, rtol=1e-5)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(xception.xception_arg_scope()):
logits, _ = self._xception_small(
inputs,
num_classes,
global_pool=global_pool,
scope='xception')
self.assertTrue(logits.op.name.startswith('xception/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(xception.xception_arg_scope()):
output, _ = self._xception_small(
inputs,
None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 16])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.shape, (batch, 3, 3, 16))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(xception.xception_arg_scope()):
output, _ = self._xception_small(
inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 16])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.shape, (batch, 9, 9, 16))
def testEndpointsReuse(self):
inputs = create_test_input(2, 32, 32, 3)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points0 = xception.xception_65(
inputs,
num_classes=10,
reuse=False)
with slim.arg_scope(xception.xception_arg_scope()):
_, end_points1 = xception.xception_65(
inputs,
num_classes=10,
reuse=True)
self.assertItemsEqual(list(end_points0.keys()), list(end_points1.keys()))
def testUseBoundedAcitvation(self):
global_pool = False
num_classes = 3
output_stride = 16
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = create_test_input(2, 65, 65, 3)
with slim.arg_scope(xception.xception_arg_scope(
use_bounded_activation=use_bounded_activation)):
_, _ = self._xception_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='xception')
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| 20,109 | 40.124744 | 80 | py |
models | models-master/research/deeplab/core/dense_prediction_cell_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_prediction_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from deeplab.core import dense_prediction_cell
class DensePredictionCellTest(tf.test.TestCase):
def setUp(self):
self.segmentation_layer = dense_prediction_cell.DensePredictionCell(
config=[
{
dense_prediction_cell._INPUT: -1,
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: 1,
},
{
dense_prediction_cell._INPUT: 0,
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: 3,
dense_prediction_cell._RATE: [1, 3],
},
{
dense_prediction_cell._INPUT: 1,
dense_prediction_cell._OP: (
dense_prediction_cell._PYRAMID_POOLING),
dense_prediction_cell._GRID_SIZE: [1, 2],
},
],
hparams={'conv_rate_multiplier': 2})
def testPyramidPoolingArguments(self):
features_size, pooled_kernel = (
self.segmentation_layer._get_pyramid_pooling_arguments(
crop_size=[513, 513],
output_stride=16,
image_grid=[4, 4]))
self.assertListEqual(features_size, [33, 33])
self.assertListEqual(pooled_kernel, [9, 9])
def testPyramidPoolingArgumentsWithImageGrid1x1(self):
features_size, pooled_kernel = (
self.segmentation_layer._get_pyramid_pooling_arguments(
crop_size=[257, 257],
output_stride=16,
image_grid=[1, 1]))
self.assertListEqual(features_size, [17, 17])
self.assertListEqual(pooled_kernel, [17, 17])
def testParseOperationStringWithConv1x1(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: [1, 1],
},
crop_size=[513, 513], output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._CONV)
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [1, 1])
def testParseOperationStringWithConv3x3(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: [3, 3],
dense_prediction_cell._RATE: [9, 6],
},
crop_size=[513, 513], output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._CONV)
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [3, 3])
self.assertEqual(operation[dense_prediction_cell._RATE], [9, 6])
def testParseOperationStringWithPyramidPooling2x2(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._PYRAMID_POOLING,
dense_prediction_cell._GRID_SIZE: [2, 2],
},
crop_size=[513, 513],
output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._PYRAMID_POOLING)
# The feature maps of size [33, 33] should be covered by 2x2 kernels with
# size [17, 17].
self.assertListEqual(
operation[dense_prediction_cell._TARGET_SIZE], [33, 33])
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [17, 17])
def testBuildCell(self):
with self.test_session(graph=tf.Graph()) as sess:
features = tf.random_normal([2, 33, 33, 5])
concat_logits = self.segmentation_layer.build_cell(
features,
output_stride=8,
crop_size=[257, 257])
sess.run(tf.global_variables_initializer())
concat_logits = sess.run(concat_logits)
self.assertTrue(concat_logits.any())
def testBuildCellWithImagePoolingCropSize(self):
with self.test_session(graph=tf.Graph()) as sess:
features = tf.random_normal([2, 33, 33, 5])
concat_logits = self.segmentation_layer.build_cell(
features,
output_stride=8,
crop_size=[257, 257],
image_pooling_crop_size=[129, 129])
sess.run(tf.global_variables_initializer())
concat_logits = sess.run(concat_logits)
self.assertTrue(concat_logits.any())
if __name__ == '__main__':
tf.test.main()
| 5,290 | 37.620438 | 80 | py |
models | models-master/research/deeplab/core/nas_network_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_v1_beta module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from deeplab.core import nas_genotypes
from deeplab.core import nas_network
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
def create_test_input(batch, height, width, channels):
"""Creates test input tensor."""
if None in [batch, height, width, channels]:
return tf.placeholder(tf.float32, (batch, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch, 1, 1, channels]))
class NASNetworkTest(tf.test.TestCase):
"""Tests with complete small NAS networks."""
def _pnasnet(self,
images,
backbone,
num_classes,
is_training=True,
output_stride=16,
final_endpoint=None):
"""Build PNASNet model backbone."""
hparams = contrib_training.HParams(
filter_scaling_rate=2.0,
num_conv_filters=10,
drop_path_keep_prob=1.0,
total_training_steps=200000,
)
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
cell = nas_genotypes.PNASCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob,
len(backbone),
hparams.total_training_steps)
with arg_scope([slim.dropout, slim.batch_norm], is_training=is_training):
return nas_network._build_nas_base(
images,
cell=cell,
backbone=backbone,
num_classes=num_classes,
hparams=hparams,
reuse=tf.AUTO_REUSE,
scope='pnasnet_small',
final_endpoint=final_endpoint)
def testFullyConvolutionalEndpointShapes(self):
num_classes = 10
backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1]
inputs = create_test_input(None, 321, 321, 3)
with slim.arg_scope(nas_network.nas_arg_scope()):
_, end_points = self._pnasnet(inputs, backbone, num_classes)
endpoint_to_shape = {
'Stem': [None, 81, 81, 128],
'Cell_0': [None, 81, 81, 50],
'Cell_1': [None, 81, 81, 50],
'Cell_2': [None, 81, 81, 50],
'Cell_3': [None, 41, 41, 100],
'Cell_4': [None, 21, 21, 200],
'Cell_5': [None, 41, 41, 100],
'Cell_6': [None, 21, 21, 200],
'Cell_7': [None, 21, 21, 200],
'Cell_8': [None, 11, 11, 400],
'Cell_9': [None, 11, 11, 400],
'Cell_10': [None, 21, 21, 200],
'Cell_11': [None, 41, 41, 100]
}
for endpoint, shape in endpoint_to_shape.items():
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
if __name__ == '__main__':
tf.test.main()
| 3,923 | 34.035714 | 80 | py |
models | models-master/research/deeplab/core/utils.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This script contains utility functions."""
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
# Quantized version of sigmoid function.
q_sigmoid = lambda x: tf.nn.relu6(x + 3) * 0.16667
def resize_bilinear(images, size, output_dtype=tf.float32):
"""Returns resized images as output_type.
Args:
images: A tensor of size [batch, height_in, width_in, channels].
size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size
for the images.
output_dtype: The destination type.
Returns:
A tensor of size [batch, height_out, width_out, channels] as a dtype of
output_dtype.
"""
images = tf.image.resize_bilinear(images, size, align_corners=True)
return tf.cast(images, dtype=output_dtype)
def scale_dimension(dim, scale):
"""Scales the input dimension.
Args:
dim: Input dimension (a scalar or a scalar Tensor).
scale: The amount of scaling applied to the input.
Returns:
Scaled dimension.
"""
if isinstance(dim, tf.Tensor):
return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32)
else:
return int((float(dim) - 1.0) * scale + 1.0)
def split_separable_conv2d(inputs,
filters,
kernel_size=3,
rate=1,
weight_decay=0.00004,
depthwise_weights_initializer_stddev=0.33,
pointwise_weights_initializer_stddev=0.06,
scope=None):
"""Splits a separable conv2d into depthwise and pointwise conv2d.
This operation differs from `tf.layers.separable_conv2d` as this operation
applies activation function between depthwise and pointwise conv2d.
Args:
inputs: Input tensor with shape [batch, height, width, channels].
filters: Number of filters in the 1x1 pointwise convolution.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
rate: Atrous convolution rate for the depthwise convolution.
weight_decay: The weight decay to use for regularizing the model.
depthwise_weights_initializer_stddev: The standard deviation of the
truncated normal weight initializer for depthwise convolution.
pointwise_weights_initializer_stddev: The standard deviation of the
truncated normal weight initializer for pointwise convolution.
scope: Optional scope for the operation.
Returns:
Computed features after split separable conv2d.
"""
outputs = slim.separable_conv2d(
inputs,
None,
kernel_size=kernel_size,
depth_multiplier=1,
rate=rate,
weights_initializer=tf.truncated_normal_initializer(
stddev=depthwise_weights_initializer_stddev),
weights_regularizer=None,
scope=scope + '_depthwise')
return slim.conv2d(
outputs,
filters,
1,
weights_initializer=tf.truncated_normal_initializer(
stddev=pointwise_weights_initializer_stddev),
weights_regularizer=slim.l2_regularizer(weight_decay),
scope=scope + '_pointwise')
def get_label_weight_mask(labels, ignore_label, num_classes, label_weights=1.0):
"""Gets the label weight mask.
Args:
labels: A Tensor of labels with the shape of [-1].
ignore_label: Integer, label to ignore.
num_classes: Integer, the number of semantic classes.
label_weights: A float or a list of weights. If it is a float, it means all
the labels have the same weight. If it is a list of weights, then each
element in the list represents the weight for the label of its index, for
example, label_weights = [0.1, 0.5] means the weight for label 0 is 0.1
and the weight for label 1 is 0.5.
Returns:
A Tensor of label weights with the same shape of labels, each element is the
weight for the label with the same index in labels and the element is 0.0
if the label is to ignore.
Raises:
ValueError: If label_weights is neither a float nor a list, or if
label_weights is a list and its length is not equal to num_classes.
"""
if not isinstance(label_weights, (float, list)):
raise ValueError(
'The type of label_weights is invalid, it must be a float or a list.')
if isinstance(label_weights, list) and len(label_weights) != num_classes:
raise ValueError(
'Length of label_weights must be equal to num_classes if it is a list, '
'label_weights: %s, num_classes: %d.' % (label_weights, num_classes))
not_ignore_mask = tf.not_equal(labels, ignore_label)
not_ignore_mask = tf.cast(not_ignore_mask, tf.float32)
if isinstance(label_weights, float):
return not_ignore_mask * label_weights
label_weights = tf.constant(label_weights, tf.float32)
weight_mask = tf.einsum('...y,y->...',
tf.one_hot(labels, num_classes, dtype=tf.float32),
label_weights)
return tf.multiply(not_ignore_mask, weight_mask)
def get_batch_norm_fn(sync_batch_norm_method):
"""Gets batch norm function.
Currently we only support the following methods:
- `None` (no sync batch norm). We use slim.batch_norm in this case.
Args:
sync_batch_norm_method: String, method used to sync batch norm.
Returns:
Batchnorm function.
Raises:
ValueError: If sync_batch_norm_method is not supported.
"""
if sync_batch_norm_method == 'None':
return slim.batch_norm
else:
raise ValueError('Unsupported sync_batch_norm_method.')
def get_batch_norm_params(decay=0.9997,
epsilon=1e-5,
center=True,
scale=True,
is_training=True,
sync_batch_norm_method='None',
initialize_gamma_as_zeros=False):
"""Gets batch norm parameters.
Args:
decay: Float, decay for the moving average.
epsilon: Float, value added to variance to avoid dividing by zero.
center: Boolean. If True, add offset of `beta` to normalized tensor. If
False,`beta` is ignored.
scale: Boolean. If True, multiply by `gamma`. If False, `gamma` is not used.
is_training: Boolean, whether or not the layer is in training mode.
sync_batch_norm_method: String, method used to sync batch norm.
initialize_gamma_as_zeros: Boolean, initializing `gamma` as zeros or not.
Returns:
A dictionary for batchnorm parameters.
Raises:
ValueError: If sync_batch_norm_method is not supported.
"""
batch_norm_params = {
'is_training': is_training,
'decay': decay,
'epsilon': epsilon,
'scale': scale,
'center': center,
}
if initialize_gamma_as_zeros:
if sync_batch_norm_method == 'None':
# Slim-type gamma_initialier.
batch_norm_params['param_initializers'] = {
'gamma': tf.zeros_initializer(),
}
else:
raise ValueError('Unsupported sync_batch_norm_method.')
return batch_norm_params
| 7,846 | 35.497674 | 80 | py |
models | models-master/research/deeplab/core/nas_network.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network structure used by NAS.
Here we provide a few NAS backbones for semantic segmentation.
Currently, we have
1. pnasnet
"Progressive Neural Architecture Search", Chenxi Liu, Barret Zoph,
Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei,
Alan Yuille, Jonathan Huang, Kevin Murphy. In ECCV, 2018.
2. hnasnet (also called Auto-DeepLab)
"Auto-DeepLab: Hierarchical Neural Architecture Search for Semantic
Image Segmentation", Chenxi Liu, Liang-Chieh Chen, Florian Schroff,
Hartwig Adam, Wei Hua, Alan Yuille, Li Fei-Fei. In CVPR, 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from deeplab.core import nas_genotypes
from deeplab.core import utils
from deeplab.core.nas_cell import NASBaseCell
from tensorflow.contrib.slim.nets import resnet_utils
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
resize_bilinear = utils.resize_bilinear
scale_dimension = utils.scale_dimension
def config(num_conv_filters=20,
total_training_steps=500000,
drop_path_keep_prob=1.0):
return contrib_training.HParams(
# Multiplier when spatial size is reduced by 2.
filter_scaling_rate=2.0,
# Number of filters of the stem output tensor.
num_conv_filters=num_conv_filters,
# Probability to keep each path in the cell when training.
drop_path_keep_prob=drop_path_keep_prob,
# Total training steps to help drop path probability calculation.
total_training_steps=total_training_steps,
)
def nas_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
sync_batch_norm_method='None'):
"""Default arg scope for the NAS models."""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
}
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
factor=1 / 3.0, mode='FAN_IN', uniform=True)
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([batch_norm], **batch_norm_params) as sc:
return sc
def _nas_stem(inputs,
batch_norm_fn=slim.batch_norm):
"""Stem used for NAS models."""
net = resnet_utils.conv2d_same(inputs, 64, 3, stride=2, scope='conv0')
net = batch_norm_fn(net, scope='conv0_bn')
net = tf.nn.relu(net)
net = resnet_utils.conv2d_same(net, 64, 3, stride=1, scope='conv1')
net = batch_norm_fn(net, scope='conv1_bn')
cell_outputs = [net]
net = tf.nn.relu(net)
net = resnet_utils.conv2d_same(net, 128, 3, stride=2, scope='conv2')
net = batch_norm_fn(net, scope='conv2_bn')
cell_outputs.append(net)
return net, cell_outputs
def _build_nas_base(images,
cell,
backbone,
num_classes,
hparams,
global_pool=False,
output_stride=16,
nas_use_classification_head=False,
reuse=None,
scope=None,
final_endpoint=None,
batch_norm_fn=slim.batch_norm,
nas_remove_os32_stride=False):
"""Constructs a NAS model.
Args:
images: A tensor of size [batch, height, width, channels].
cell: Cell structure used in the network.
backbone: Backbone structure used in the network. A list of integers in
which value 0 means "output_stride=4", value 1 means "output_stride=8",
value 2 means "output_stride=16", and value 3 means "output_stride=32".
num_classes: Number of classes to predict.
hparams: Hyperparameters needed to construct the network.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: Interger, the stride of output feature maps.
nas_use_classification_head: Boolean, use image classification head.
reuse: Whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
final_endpoint: The endpoint to construct the network up to.
batch_norm_fn: Batch norm function.
nas_remove_os32_stride: Boolean, remove stride in output_stride 32 branch.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If output_stride is not a multiple of backbone output stride.
"""
with tf.variable_scope(scope, 'nas', [images], reuse=reuse):
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
net, cell_outputs = _nas_stem(images,
batch_norm_fn=batch_norm_fn)
if add_and_check_endpoint('Stem', net):
return net, end_points
# Run the cells
filter_scaling = 1.0
for cell_num in range(len(backbone)):
stride = 1
if cell_num == 0:
if backbone[0] == 1:
stride = 2
filter_scaling *= hparams.filter_scaling_rate
else:
if backbone[cell_num] == backbone[cell_num - 1] + 1:
stride = 2
if backbone[cell_num] == 3 and nas_remove_os32_stride:
stride = 1
filter_scaling *= hparams.filter_scaling_rate
elif backbone[cell_num] == backbone[cell_num - 1] - 1:
if backbone[cell_num - 1] == 3 and nas_remove_os32_stride:
# No need to rescale features.
pass
else:
# Scale features by a factor of 2.
scaled_height = scale_dimension(net.shape[1].value, 2)
scaled_width = scale_dimension(net.shape[2].value, 2)
net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype)
filter_scaling /= hparams.filter_scaling_rate
net = cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=cell_outputs[-2],
cell_num=cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
cell_outputs.append(net)
net = tf.nn.relu(net)
if nas_use_classification_head:
# Add image classification head.
# We will expand the filters for different output_strides.
output_stride_to_expanded_filters = {8: 256, 16: 512, 32: 1024}
current_output_scale = 2 + backbone[-1]
current_output_stride = 2 ** current_output_scale
if output_stride % current_output_stride != 0:
raise ValueError(
'output_stride must be a multiple of backbone output stride.')
output_stride //= current_output_stride
rate = 1
if current_output_stride != 32:
num_downsampling = 5 - current_output_scale
for i in range(num_downsampling):
# Gradually donwsample feature maps to output stride = 32.
target_output_stride = 2 ** (current_output_scale + 1 + i)
target_filters = output_stride_to_expanded_filters[
target_output_stride]
scope = 'downsample_os{}'.format(target_output_stride)
if output_stride != 1:
stride = 2
output_stride //= 2
else:
stride = 1
rate *= 2
net = resnet_utils.conv2d_same(
net, target_filters, 3, stride=stride, rate=rate,
scope=scope + '_conv')
net = batch_norm_fn(net, scope=scope + '_bn')
add_and_check_endpoint(scope, net)
net = tf.nn.relu(net)
# Apply 1x1 convolution to expand dimension to 2048.
scope = 'classification_head'
net = slim.conv2d(net, 2048, 1, scope=scope + '_conv')
net = batch_norm_fn(net, scope=scope + '_bn')
add_and_check_endpoint(scope, net)
net = tf.nn.relu(net)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, 1, activation_fn=None,
normalizer_fn=None, scope='logits')
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def pnasnet(images,
num_classes,
is_training=True,
global_pool=False,
output_stride=16,
nas_architecture_options=None,
nas_training_hyper_parameters=None,
reuse=None,
scope='pnasnet',
final_endpoint=None,
sync_batch_norm_method='None'):
"""Builds PNASNet model."""
if nas_architecture_options is None:
raise ValueError(
'Using NAS model variants. nas_architecture_options cannot be None.')
hparams = config(num_conv_filters=nas_architecture_options[
'nas_stem_output_num_conv_filters'])
if nas_training_hyper_parameters:
hparams.set_hparam('drop_path_keep_prob',
nas_training_hyper_parameters['drop_path_keep_prob'])
hparams.set_hparam('total_training_steps',
nas_training_hyper_parameters['total_training_steps'])
if not is_training:
tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.')
hparams.set_hparam('drop_path_keep_prob', 1.0)
tf.logging.info(hparams)
if output_stride == 8:
backbone = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
elif output_stride == 16:
backbone = [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2]
elif output_stride == 32:
backbone = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
else:
raise ValueError('Unsupported output_stride ', output_stride)
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
cell = nas_genotypes.PNASCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob,
len(backbone),
hparams.total_training_steps,
batch_norm_fn=batch_norm)
with arg_scope([slim.dropout, batch_norm], is_training=is_training):
return _build_nas_base(
images,
cell=cell,
backbone=backbone,
num_classes=num_classes,
hparams=hparams,
global_pool=global_pool,
output_stride=output_stride,
nas_use_classification_head=nas_architecture_options[
'nas_use_classification_head'],
reuse=reuse,
scope=scope,
final_endpoint=final_endpoint,
batch_norm_fn=batch_norm,
nas_remove_os32_stride=nas_architecture_options[
'nas_remove_os32_stride'])
# pylint: disable=unused-argument
def hnasnet(images,
num_classes,
is_training=True,
global_pool=False,
output_stride=8,
nas_architecture_options=None,
nas_training_hyper_parameters=None,
reuse=None,
scope='hnasnet',
final_endpoint=None,
sync_batch_norm_method='None'):
"""Builds hierarchical model."""
if nas_architecture_options is None:
raise ValueError(
'Using NAS model variants. nas_architecture_options cannot be None.')
hparams = config(num_conv_filters=nas_architecture_options[
'nas_stem_output_num_conv_filters'])
if nas_training_hyper_parameters:
hparams.set_hparam('drop_path_keep_prob',
nas_training_hyper_parameters['drop_path_keep_prob'])
hparams.set_hparam('total_training_steps',
nas_training_hyper_parameters['total_training_steps'])
if not is_training:
tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.')
hparams.set_hparam('drop_path_keep_prob', 1.0)
tf.logging.info(hparams)
operations = [
'atrous_5x5', 'separable_3x3_2', 'separable_3x3_2', 'atrous_3x3',
'separable_3x3_2', 'separable_3x3_2', 'separable_5x5_2',
'separable_5x5_2', 'separable_5x5_2', 'atrous_5x5'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 0, 1, 0, 3, 1, 4, 2, 3, 5]
backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1]
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
cell = NASBaseCell(hparams.num_conv_filters,
operations,
used_hiddenstates,
hiddenstate_indices,
hparams.drop_path_keep_prob,
len(backbone),
hparams.total_training_steps,
batch_norm_fn=batch_norm)
with arg_scope([slim.dropout, batch_norm], is_training=is_training):
return _build_nas_base(
images,
cell=cell,
backbone=backbone,
num_classes=num_classes,
hparams=hparams,
global_pool=global_pool,
output_stride=output_stride,
nas_use_classification_head=nas_architecture_options[
'nas_use_classification_head'],
reuse=reuse,
scope=scope,
final_endpoint=final_endpoint,
batch_norm_fn=batch_norm,
nas_remove_os32_stride=nas_architecture_options[
'nas_remove_os32_stride'])
| 14,892 | 39.360434 | 80 | py |
models | models-master/research/deeplab/core/preprocess_utils.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random_uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A random scale value selected between minimum and maximum value.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return tf.cast(min_scale_factor, tf.float32)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random_uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)
shuffled_scale_factors = tf.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
image = tf.squeeze(tf.image.resize_bilinear(
tf.expand_dims(image, 0),
new_dim,
align_corners=True), [0])
if label is not None:
label = tf.image.resize(
label,
new_dim,
method=get_label_resize_method(label),
align_corners=True)
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
keep_aspect_ratio=True,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note
that the output dimension is no larger than max_size and may be slightly
smaller than max_size when factor is not None.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.cast(min_size, tf.float32)
if max_size is not None:
max_size = tf.cast(max_size, tf.float32)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (max_size - (max_size - 1) % factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
orig_height = tf.cast(orig_height, tf.float32)
orig_width = tf.cast(orig_width, tf.float32)
orig_min_size = tf.minimum(orig_height, orig_width)
# Calculate the larger of the possible sizes
large_scale_factor = min_size / orig_min_size
large_height = tf.cast(tf.floor(orig_height * large_scale_factor), tf.int32)
large_width = tf.cast(tf.floor(orig_width * large_scale_factor), tf.int32)
large_size = tf.stack([large_height, large_width])
new_size = large_size
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_size = tf.maximum(orig_height, orig_width)
small_scale_factor = max_size / orig_max_size
small_height = tf.cast(
tf.floor(orig_height * small_scale_factor), tf.int32)
small_width = tf.cast(tf.floor(orig_width * small_scale_factor), tf.int32)
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.cast(tf.reduce_max(large_size), tf.float32) > max_size,
lambda: small_size,
lambda: large_size)
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
new_size += (factor - (new_size - 1) % factor) % factor
if not keep_aspect_ratio:
# If not keep the aspect ratio, we resize everything to max_size, allowing
# us to do pre-processing without extra padding.
new_size = [tf.reduce_max(new_size), tf.reduce_max(new_size)]
new_tensor_list.append(tf.image.resize(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize(
resized_label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize(
label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
| 19,880 | 36.230337 | 80 | py |
models | models-master/research/deeplab/core/nas_genotypes.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Genotypes used by NAS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import nas_cell
slim = contrib_slim
class PNASCell(nas_cell.NASBaseCell):
"""Configuration and construction of the PNASNet-5 Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, batch_norm_fn=slim.batch_norm):
# Name of operations: op_kernel-size_num-layers.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNASCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
batch_norm_fn)
| 1,792 | 37.978261 | 80 | py |
models | models-master/research/deeplab/core/resnet_v1_beta.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet v1 model variants.
Code branched out from slim/nets/resnet_v1.py, and please refer to it for
more details.
The original version ResNets-v1 were proposed by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import conv2d_ws
from deeplab.core import utils
from tensorflow.contrib.slim.nets import resnet_utils
slim = contrib_slim
_DEFAULT_MULTI_GRID = [1, 1, 1]
_DEFAULT_MULTI_GRID_RESNET_18 = [1, 1]
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
unit_rate=1,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
unit_rate: An integer, unit rate for atrous convolution.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = conv2d_ws.conv2d(
inputs,
depth,
[1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = conv2d_ws.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = conv2d_ws.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate*unit_rate, scope='conv2')
residual = conv2d_ws.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections, sc.name,
output)
@slim.add_arg_scope
def lite_bottleneck(inputs,
depth,
stride,
unit_rate=1,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
unit_rate: An integer, unit rate for atrous convolution.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'lite_bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = conv2d_ws.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = conv2d_ws.conv2d_same(
inputs, depth, 3, 1, rate=rate * unit_rate, scope='conv1')
with slim.arg_scope([conv2d_ws.conv2d], activation_fn=None):
residual = conv2d_ws.conv2d_same(
residual, depth, 3, stride, rate=rate * unit_rate, scope='conv2')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections, sc.name,
output)
def root_block_fn_for_beta_variant(net, depth_multiplier=1.0):
"""Gets root_block_fn for beta variant.
ResNet-v1 beta variant modifies the first original 7x7 convolution to three
3x3 convolutions.
Args:
net: A tensor of size [batch, height, width, channels], input to the model.
depth_multiplier: Controls the number of convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_out * depth_multiplier`.
Returns:
A tensor after three 3x3 convolutions.
"""
net = conv2d_ws.conv2d_same(
net, int(64 * depth_multiplier), 3, stride=2, scope='conv1_1')
net = conv2d_ws.conv2d_same(
net, int(64 * depth_multiplier), 3, stride=1, scope='conv1_2')
net = conv2d_ws.conv2d_same(
net, int(128 * depth_multiplier), 3, stride=1, scope='conv1_3')
return net
def resnet_v1_beta(inputs,
blocks,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
root_block_fn=None,
reuse=None,
scope=None,
sync_batch_norm_method='None'):
"""Generator for v1 ResNet models (beta variant).
This function generates a family of modified ResNet v1 models. In particular,
the first original 7x7 convolution is replaced with three 3x3 convolutions.
See the resnet_v1_*() methods for specific model instantiations, obtained by
selecting different block instantiations that produce ResNets of various
depths.
The code is modified from slim/nets/resnet_v1.py, and please refer to it for
more details.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
root_block_fn: The function consisting of convolution operations applied to
the root input. If root_block_fn is None, use the original setting of
RseNet-v1, which is simply one convolution with 7x7 kernel and stride=2.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
if root_block_fn is None:
root_block_fn = functools.partial(conv2d_ws.conv2d_same,
num_outputs=64,
kernel_size=7,
stride=2,
scope='conv1')
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([
conv2d_ws.conv2d, bottleneck, lite_bottleneck,
resnet_utils.stack_blocks_dense
],
outputs_collections=end_points_collection):
if is_training is not None:
arg_scope = slim.arg_scope([batch_norm], is_training=is_training)
else:
arg_scope = slim.arg_scope([])
with arg_scope:
net = inputs
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride //= 4
net = root_block_fn(net)
net = slim.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = conv2d_ws.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits',
use_weight_standardization=False)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def resnet_v1_beta_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 beta variant bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1,
'unit_rate': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride,
'unit_rate': 1
}])
def resnet_v1_small_beta_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_18 beta variant bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_18 bottleneck block.
"""
block_args = []
for _ in range(num_units - 1):
block_args.append({'depth': base_depth, 'stride': 1, 'unit_rate': 1})
block_args.append({'depth': base_depth, 'stride': stride, 'unit_rate': 1})
return resnet_utils.Block(scope, lite_bottleneck, block_args)
def resnet_v1_18(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_18',
sync_batch_norm_method='None'):
"""Resnet v1 18.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID_RESNET_18
else:
if len(multi_grid) != 2:
raise ValueError('Expect multi_grid to have length 2.')
block4_args = []
for rate in multi_grid:
block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate})
blocks = [
resnet_v1_small_beta_block(
'block1', base_depth=64, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block2', base_depth=128, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block3', base_depth=256, num_units=2, stride=2),
resnet_utils.Block('block4', lite_bottleneck, block4_args),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_18_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
root_depth_multiplier=0.25,
reuse=None,
scope='resnet_v1_18',
sync_batch_norm_method='None'):
"""Resnet v1 18 beta variant.
This variant modifies the first convolution layer of ResNet-v1-18. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
root_depth_multiplier: Float, depth multiplier used for the first three
convolution layers that replace the 7x7 convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID_RESNET_18
else:
if len(multi_grid) != 2:
raise ValueError('Expect multi_grid to have length 2.')
block4_args = []
for rate in multi_grid:
block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate})
blocks = [
resnet_v1_small_beta_block(
'block1', base_depth=64, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block2', base_depth=128, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block3', base_depth=256, num_units=2, stride=2),
resnet_utils.Block('block4', lite_bottleneck, block4_args),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant,
depth_multiplier=root_depth_multiplier),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_50(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_50',
sync_batch_norm_method='None'):
"""Resnet v1 50.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=6, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_50_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_50',
sync_batch_norm_method='None'):
"""Resnet v1 50 beta variant.
This variant modifies the first convolution layer of ResNet-v1-50. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=6, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_101(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_101',
sync_batch_norm_method='None'):
"""Resnet v1 101.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=23, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_101_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_101',
sync_batch_norm_method='None'):
"""Resnet v1 101 beta variant.
This variant modifies the first convolution layer of ResNet-v1-101. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=23, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
sync_batch_norm_method='None',
normalization_method='unspecified',
use_weight_standardization=False):
"""Defines the default ResNet arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Deprecated in favor of normalization_method.
sync_batch_norm_method: String, sync batchnorm method.
normalization_method: String, one of `batch`, `none`, or `group`, to use
batch normalization, no normalization, or group normalization.
use_weight_standardization: Boolean, whether to use weight standardization.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
}
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
if normalization_method == 'batch':
normalizer_fn = batch_norm
elif normalization_method == 'none':
normalizer_fn = None
elif normalization_method == 'group':
normalizer_fn = slim.group_norm
elif normalization_method == 'unspecified':
normalizer_fn = batch_norm if use_batch_norm else None
else:
raise ValueError('Unrecognized normalization_method %s' %
normalization_method)
with slim.arg_scope([conv2d_ws.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
use_weight_standardization=use_weight_standardization):
with slim.arg_scope([batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 34,236 | 40.349034 | 80 | py |
models | models-master/research/deeplab/core/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/core/dense_prediction_cell.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Prediction Cell class that can be evolved in semantic segmentation.
DensePredictionCell is used as a `layer` in semantic segmentation whose
architecture is determined by the `config`, a dictionary specifying
the architecture.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import utils
slim = contrib_slim
# Local constants.
_META_ARCHITECTURE_SCOPE = 'meta_architecture'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_OP = 'op'
_CONV = 'conv'
_PYRAMID_POOLING = 'pyramid_pooling'
_KERNEL = 'kernel'
_RATE = 'rate'
_GRID_SIZE = 'grid_size'
_TARGET_SIZE = 'target_size'
_INPUT = 'input'
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply dropout on the concatenated
features or not.
- dropout_on_projection_features: Boolean, apply dropout on the projection
features or not.
- dropout_keep_prob: Float, when `dropout_on_concat_features' or
`dropout_on_projection_features' is True, the `keep_prob` value used
in the dropout operation.
- concat_channels: Integer, the concatenated features will be
channel-reduced to `concat_channels` channels.
- conv_rate_multiplier: Integer, used to multiply the convolution rates.
This is useful in the case when the output_stride is changed from 16
to 8, we need to double the convolution rates correspondingly.
"""
return {
'reduction_size': 256,
'dropout_on_concat_features': True,
'dropout_on_projection_features': False,
'dropout_keep_prob': 0.9,
'concat_channels': 256,
'conv_rate_multiplier': 1,
}
class DensePredictionCell(object):
"""DensePredictionCell class used as a 'layer' in semantic segmentation."""
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary returned by
dense_prediction_cell_hparams().
Raises:
ValueError: If `conv_rate_multiplier` has value < 1.
"""
self.hparams = dense_prediction_cell_hparams()
if hparams is not None:
self.hparams.update(hparams)
self.config = config
# Check values in hparams are valid or not.
if self.hparams['conv_rate_multiplier'] < 1:
raise ValueError('conv_rate_multiplier cannot have value < 1.')
def _get_pyramid_pooling_arguments(
self, crop_size, output_stride, image_grid, image_pooling_crop_size=None):
"""Gets arguments for pyramid pooling.
Args:
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_grid: A list of two integers, [image_grid_height, image_grid_width],
specifying the grid size of how the pyramid pooling will be performed.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A list of (resize_value, pooled_kernel)
"""
resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride)
resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride)
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = crop_size
pooled_height = utils.scale_dimension(
image_pooling_crop_size[0], 1. / (output_stride * image_grid[0]))
pooled_width = utils.scale_dimension(
image_pooling_crop_size[1], 1. / (output_stride * image_grid[1]))
return ([resize_height, resize_width], [pooled_height, pooled_width])
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parameters for one
operation.
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A dictionary stores the related information for the operation.
"""
if config[_OP] == _PYRAMID_POOLING:
(config[_TARGET_SIZE],
config[_KERNEL]) = self._get_pyramid_pooling_arguments(
crop_size=crop_size,
output_stride=output_stride,
image_grid=config[_GRID_SIZE],
image_pooling_crop_size=image_pooling_crop_size)
return config
def build_cell(self,
features,
output_stride=16,
crop_size=None,
image_pooling_crop_size=None,
weight_decay=0.00004,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
scope=None):
"""Builds the dense prediction cell based on the config.
Args:
features: Input feature map of size [batch, height, width, channels].
output_stride: Int, output stride at which the features were extracted.
crop_size: A list [crop_height, crop_width], determining the input
features resolution.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
weight_decay: Float, the weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Boolean, is training or not.
fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not.
scope: Optional string, specifying the variable scope.
Returns:
Features after passing through the constructed dense prediction cell with
shape = [batch, height, width, channels] where channels are determined
by `reduction_size` returned by dense_prediction_cell_hparams().
Raises:
ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or
the operation is not recognized.
"""
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale': True,
}
hparams = self.hparams
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]):
depth = hparams['reduction_size']
branch_logits = []
for i, current_config in enumerate(self.config):
scope = 'branch%d' % i
current_config = self._parse_operation(
config=current_config,
crop_size=crop_size,
output_stride=output_stride,
image_pooling_crop_size=image_pooling_crop_size)
tf.logging.info(current_config)
if current_config[_INPUT] < 0:
operation_input = features
else:
operation_input = branch_logits[current_config[_INPUT]]
if current_config[_OP] == _CONV:
if current_config[_KERNEL] == [1, 1] or current_config[
_KERNEL] == 1:
branch_logits.append(
slim.conv2d(operation_input, depth, 1, scope=scope))
else:
conv_rate = [r * hparams['conv_rate_multiplier']
for r in current_config[_RATE]]
branch_logits.append(
utils.split_separable_conv2d(
operation_input,
filters=depth,
kernel_size=current_config[_KERNEL],
rate=conv_rate,
weight_decay=weight_decay,
scope=scope))
elif current_config[_OP] == _PYRAMID_POOLING:
pooled_features = slim.avg_pool2d(
operation_input,
kernel_size=current_config[_KERNEL],
stride=[1, 1],
padding='VALID')
pooled_features = slim.conv2d(
pooled_features,
depth,
1,
scope=scope)
pooled_features = tf.image.resize_bilinear(
pooled_features,
current_config[_TARGET_SIZE],
align_corners=True)
# Set shape for resize_height/resize_width if they are not Tensor.
resize_height = current_config[_TARGET_SIZE][0]
resize_width = current_config[_TARGET_SIZE][1]
if isinstance(resize_height, tf.Tensor):
resize_height = None
if isinstance(resize_width, tf.Tensor):
resize_width = None
pooled_features.set_shape(
[None, resize_height, resize_width, depth])
branch_logits.append(pooled_features)
else:
raise ValueError('Unrecognized operation.')
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if self.hparams['dropout_on_concat_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
concat_logits = slim.conv2d(concat_logits,
self.hparams['concat_channels'],
1,
scope=_CONCAT_PROJECTION_SCOPE)
if self.hparams['dropout_on_projection_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
return concat_logits
| 12,168 | 40.817869 | 80 | py |
models | models-master/research/deeplab/core/xception.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Xception model.
"Xception: Deep Learning with Depthwise Separable Convolutions"
Fran{\c{c}}ois Chollet
https://arxiv.org/abs/1610.02357
We implement the modified version by Jifeng Dai et al. for their COCO 2017
detection challenge submission, where the model is made deeper and has aligned
features for dense prediction tasks. See their slides for details:
"Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge
2017 Entry"
Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei and Jifeng Dai
ICCV 2017 COCO Challenge workshop
http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf
We made a few more changes on top of MSRA's modifications:
1. Fully convolutional: All the max-pooling layers are replaced with separable
conv2d with stride = 2. This allows us to use atrous convolution to extract
feature maps at any resolution.
2. We support adding ReLU and BatchNorm after depthwise convolution, motivated
by the design of MobileNetv1.
"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications"
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
https://arxiv.org/abs/1704.04861
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import utils
from tensorflow.contrib.slim.nets import resnet_utils
from nets.mobilenet import conv_blocks as mobilenet_v3_ops
slim = contrib_slim
_DEFAULT_MULTI_GRID = [1, 1, 1]
# The cap for tf.clip_by_value.
_CLIP_CAP = 6
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing an Xception block.
Its parts are:
scope: The scope of the block.
unit_fn: The Xception unit function which takes as input a tensor and
returns another tensor with the output of the Xception unit.
args: A list of length equal to the number of units in the block. The list
contains one dictionary for each unit in the block to serve as argument to
unit_fn.
"""
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
@slim.add_arg_scope
def separable_conv2d_same(inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride,
rate=1,
use_explicit_padding=True,
regularize_depthwise=False,
scope=None,
**kwargs):
"""Strided 2-D separable convolution with 'SAME' padding.
If stride > 1 and use_explicit_padding is True, then we do explicit zero-
padding, followed by conv2d with 'VALID' padding.
Note that
net = separable_conv2d_same(inputs, num_outputs, 3,
depth_multiplier=1, stride=stride)
is equivalent to
net = slim.separable_conv2d(inputs, num_outputs, 3,
depth_multiplier=1, stride=1, padding='SAME')
net = resnet_utils.subsample(net, factor=stride)
whereas
net = slim.separable_conv2d(inputs, num_outputs, 3, stride=stride,
depth_multiplier=1, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function.
Consequently, if the input feature map has even height or width, setting
`use_explicit_padding=False` will result in feature misalignment by one pixel
along the corresponding dimension.
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
use_explicit_padding: If True, use explicit padding to make the model fully
compatible with the open source version, otherwise use the native
Tensorflow 'SAME' padding.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
scope: Scope.
**kwargs: additional keyword arguments to pass to slim.conv2d
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
def _separable_conv2d(padding):
"""Wrapper for separable conv2d."""
return slim.separable_conv2d(inputs,
num_outputs,
kernel_size,
depth_multiplier=depth_multiplier,
stride=stride,
rate=rate,
padding=padding,
scope=scope,
**kwargs)
def _split_separable_conv2d(padding):
"""Splits separable conv2d into depthwise and pointwise conv2d."""
outputs = slim.separable_conv2d(inputs,
None,
kernel_size,
depth_multiplier=depth_multiplier,
stride=stride,
rate=rate,
padding=padding,
scope=scope + '_depthwise',
**kwargs)
return slim.conv2d(outputs,
num_outputs,
1,
scope=scope + '_pointwise',
**kwargs)
if stride == 1 or not use_explicit_padding:
if regularize_depthwise:
outputs = _separable_conv2d(padding='SAME')
else:
outputs = _split_separable_conv2d(padding='SAME')
else:
inputs = fixed_padding(inputs, kernel_size, rate)
if regularize_depthwise:
outputs = _separable_conv2d(padding='VALID')
else:
outputs = _split_separable_conv2d(padding='VALID')
return outputs
@slim.add_arg_scope
def xception_module(inputs,
depth_list,
skip_connection_type,
stride,
kernel_size=3,
unit_rate_list=None,
rate=1,
activation_fn_in_separable_conv=False,
regularize_depthwise=False,
outputs_collections=None,
scope=None,
use_bounded_activation=False,
use_explicit_padding=True,
use_squeeze_excite=False,
se_pool_size=None):
"""An Xception module.
The output of one Xception module is equal to the sum of `residual` and
`shortcut`, where `residual` is the feature computed by three separable
convolution. The `shortcut` is the feature computed by 1x1 convolution with
or without striding. In some cases, the `shortcut` path could be a simple
identity function or none (i.e, no shortcut).
Note that we replace the max pooling operations in the Xception module with
another separable convolution with striding, since atrous rate is not properly
supported in current TensorFlow max pooling implementation.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth_list: A list of three integers specifying the depth values of one
Xception module.
skip_connection_type: Skip connection type for the residual path. Only
supports 'conv', 'sum', or 'none'.
stride: The block unit's stride. Determines the amount of downsampling of
the units output compared to its input.
kernel_size: Integer, convolution kernel size.
unit_rate_list: A list of three integers, determining the unit rate for
each separable convolution in the xception module.
rate: An integer, rate for atrous convolution.
activation_fn_in_separable_conv: Includes activation function in the
separable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
outputs_collections: Collection to add the Xception unit output.
scope: Optional variable_scope.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
use_explicit_padding: If True, use explicit padding to make the model fully
compatible with the open source version, otherwise use the native
Tensorflow 'SAME' padding.
use_squeeze_excite: Boolean, use squeeze-and-excitation or not.
se_pool_size: None or integer specifying the pooling size used in SE module.
Returns:
The Xception module's output.
Raises:
ValueError: If depth_list and unit_rate_list do not contain three elements,
or if stride != 1 for the third separable convolution operation in the
residual path, or unsupported skip connection type.
"""
if len(depth_list) != 3:
raise ValueError('Expect three elements in depth_list.')
if unit_rate_list:
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in unit_rate_list.')
with tf.variable_scope(scope, 'xception_module', [inputs]) as sc:
residual = inputs
def _separable_conv(features, depth, kernel_size, depth_multiplier,
regularize_depthwise, rate, stride, scope):
"""Separable conv block."""
if activation_fn_in_separable_conv:
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
else:
if use_bounded_activation:
# When use_bounded_activation is True, we clip the feature values and
# apply relu6 for activation.
activation_fn = lambda x: tf.clip_by_value(x, -_CLIP_CAP, _CLIP_CAP)
features = tf.nn.relu6(features)
else:
# Original network design.
activation_fn = None
features = tf.nn.relu(features)
return separable_conv2d_same(features,
depth,
kernel_size,
depth_multiplier=depth_multiplier,
stride=stride,
rate=rate,
activation_fn=activation_fn,
use_explicit_padding=use_explicit_padding,
regularize_depthwise=regularize_depthwise,
scope=scope)
for i in range(3):
residual = _separable_conv(residual,
depth_list[i],
kernel_size=kernel_size,
depth_multiplier=1,
regularize_depthwise=regularize_depthwise,
rate=rate*unit_rate_list[i],
stride=stride if i == 2 else 1,
scope='separable_conv' + str(i+1))
if use_squeeze_excite:
residual = mobilenet_v3_ops.squeeze_excite(
input_tensor=residual,
squeeze_factor=16,
inner_activation_fn=tf.nn.relu,
gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667,
pool=se_pool_size)
if skip_connection_type == 'conv':
shortcut = slim.conv2d(inputs,
depth_list[-1],
[1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
if use_bounded_activation:
residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP)
shortcut = tf.clip_by_value(shortcut, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + shortcut
if use_bounded_activation:
outputs = tf.nn.relu6(outputs)
elif skip_connection_type == 'sum':
if use_bounded_activation:
residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP)
inputs = tf.clip_by_value(inputs, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + inputs
if use_bounded_activation:
outputs = tf.nn.relu6(outputs)
elif skip_connection_type == 'none':
outputs = residual
else:
raise ValueError('Unsupported skip connection type.')
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
outputs)
@slim.add_arg_scope
def stack_blocks_dense(net,
blocks,
output_stride=None,
outputs_collections=None):
"""Stacks Xception blocks and controls output feature density.
First, this function creates scopes for the Xception in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the output
stride, which is the ratio of the input to output spatial resolution. This
is useful for dense prediction tasks such as semantic segmentation or
object detection.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A tensor of size [batch, height, width, channels].
blocks: A list of length equal to the number of Xception blocks. Each
element is an Xception Block object describing the units in the block.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of Xception.
For example, if the Xception employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the Xception block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def xception(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
reuse=None,
scope=None,
sync_batch_norm_method='None'):
"""Generator for Xception models.
This function generates a family of Xception models. See the xception_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce Xception of various depths.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels]. Must be
floating point. If a pretrained checkpoint is used, pixel values should be
the same as during training (see go/slim-classification-models for
specifics).
blocks: A list of length equal to the number of Xception blocks. Each
element is an Xception Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
keep_prob: Keep probability used in the pre-logits dropout layer.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method. Currently only
support `None`.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last Xception block, potentially after
global average pooling. If num_classes is a non-zero integer, net contains
the pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(
scope, 'xception', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + 'end_points'
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
with slim.arg_scope([slim.conv2d,
slim.separable_conv2d,
xception_module,
stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([batch_norm], is_training=is_training):
net = inputs
if output_stride is not None:
if output_stride % 2 != 0:
raise ValueError('The output_stride needs to be a multiple of 2.')
output_stride //= 2
# Root block function operated on inputs.
net = resnet_utils.conv2d_same(net, 32, 3, stride=2,
scope='entry_flow/conv1_1')
net = resnet_utils.conv2d_same(net, 64, 3, stride=1,
scope='entry_flow/conv1_2')
# Extract features for entry_flow, middle_flow, and exit_flow.
net = stack_blocks_dense(net, blocks, output_stride)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection, clear_collection=True)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training,
scope='prelogits_dropout')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def xception_block(scope,
depth_list,
skip_connection_type,
activation_fn_in_separable_conv,
regularize_depthwise,
num_units,
stride,
kernel_size=3,
unit_rate_list=None,
use_squeeze_excite=False,
se_pool_size=None):
"""Helper function for creating a Xception block.
Args:
scope: The scope of the block.
depth_list: The depth of the bottleneck layer for each unit.
skip_connection_type: Skip connection type for the residual path. Only
supports 'conv', 'sum', or 'none'.
activation_fn_in_separable_conv: Includes activation function in the
separable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
kernel_size: Integer, convolution kernel size.
unit_rate_list: A list of three integers, determining the unit rate in the
corresponding xception block.
use_squeeze_excite: Boolean, use squeeze-and-excitation or not.
se_pool_size: None or integer specifying the pooling size used in SE module.
Returns:
An Xception block.
"""
if unit_rate_list is None:
unit_rate_list = _DEFAULT_MULTI_GRID
return Block(scope, xception_module, [{
'depth_list': depth_list,
'skip_connection_type': skip_connection_type,
'activation_fn_in_separable_conv': activation_fn_in_separable_conv,
'regularize_depthwise': regularize_depthwise,
'stride': stride,
'kernel_size': kernel_size,
'unit_rate_list': unit_rate_list,
'use_squeeze_excite': use_squeeze_excite,
'se_pool_size': se_pool_size,
}] * num_units)
def xception_41(inputs,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
reuse=None,
scope='xception_41',
sync_batch_norm_method='None'):
"""Xception-41 model."""
blocks = [
xception_block('entry_flow/block1',
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=8,
stride=1),
xception_block('exit_flow/block1',
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return xception(inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
keep_prob=keep_prob,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def xception_65_factory(inputs,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
kernel_size=3,
multi_grid=None,
reuse=None,
use_squeeze_excite=False,
se_pool_size=None,
scope='xception_65',
sync_batch_norm_method='None'):
"""Xception-65 model factory."""
blocks = [
xception_block('entry_flow/block1',
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
xception_block('entry_flow/block2',
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
xception_block('entry_flow/block3',
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('middle_flow/block1',
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('exit_flow/block1',
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('exit_flow/block2',
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
kernel_size=kernel_size,
unit_rate_list=multi_grid,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
]
return xception(inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
keep_prob=keep_prob,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def xception_65(inputs,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
reuse=None,
scope='xception_65',
sync_batch_norm_method='None'):
"""Xception-65 model."""
return xception_65_factory(
inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
keep_prob=keep_prob,
output_stride=output_stride,
regularize_depthwise=regularize_depthwise,
multi_grid=multi_grid,
reuse=reuse,
scope=scope,
use_squeeze_excite=False,
se_pool_size=None,
sync_batch_norm_method=sync_batch_norm_method)
def xception_71_factory(inputs,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
kernel_size=3,
multi_grid=None,
reuse=None,
scope='xception_71',
use_squeeze_excite=False,
se_pool_size=None,
sync_batch_norm_method='None'):
"""Xception-71 model factory."""
blocks = [
xception_block('entry_flow/block1',
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
xception_block('entry_flow/block2',
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
kernel_size=kernel_size,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
xception_block('entry_flow/block3',
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
xception_block('entry_flow/block4',
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('entry_flow/block5',
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('middle_flow/block1',
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('exit_flow/block1',
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2,
kernel_size=kernel_size,
use_squeeze_excite=use_squeeze_excite,
se_pool_size=se_pool_size),
xception_block('exit_flow/block2',
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
kernel_size=kernel_size,
unit_rate_list=multi_grid,
use_squeeze_excite=False,
se_pool_size=se_pool_size),
]
return xception(inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
keep_prob=keep_prob,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def xception_71(inputs,
num_classes=None,
is_training=True,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
reuse=None,
scope='xception_71',
sync_batch_norm_method='None'):
"""Xception-71 model."""
return xception_71_factory(
inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
keep_prob=keep_prob,
output_stride=output_stride,
regularize_depthwise=regularize_depthwise,
multi_grid=multi_grid,
reuse=reuse,
scope=scope,
use_squeeze_excite=False,
se_pool_size=None,
sync_batch_norm_method=sync_batch_norm_method)
def xception_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=True,
weights_initializer_stddev=0.09,
regularize_depthwise=False,
use_batch_norm=True,
use_bounded_activation=False,
sync_batch_norm_method='None'):
"""Defines the default Xception arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
weights_initializer_stddev: The standard deviation of the trunctated normal
weight initializer.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
use_batch_norm: Whether or not to use batch normalization.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
sync_batch_norm_method: String, sync batchnorm method. Currently only
support `None`. Also, it is only effective for Xception.
Returns:
An `arg_scope` to use for the Xception models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
}
if regularize_depthwise:
depthwise_regularizer = slim.l2_regularizer(weight_decay)
else:
depthwise_regularizer = None
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_initializer=tf.truncated_normal_initializer(
stddev=weights_initializer_stddev),
activation_fn=activation_fn,
normalizer_fn=batch_norm if use_batch_norm else None):
with slim.arg_scope([batch_norm], **batch_norm_params):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.separable_conv2d],
weights_regularizer=depthwise_regularizer):
with slim.arg_scope(
[xception_module],
use_bounded_activation=use_bounded_activation,
use_explicit_padding=not use_bounded_activation) as arg_sc:
return arg_sc
| 40,279 | 41.579281 | 80 | py |
models | models-master/research/deeplab/core/utils_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.py."""
import numpy as np
import tensorflow as tf
from deeplab.core import utils
class UtilsTest(tf.test.TestCase):
def testScaleDimensionOutput(self):
self.assertEqual(161, utils.scale_dimension(321, 0.5))
self.assertEqual(193, utils.scale_dimension(321, 0.6))
self.assertEqual(241, utils.scale_dimension(321, 0.75))
def testGetLabelWeightMask_withFloatLabelWeights(self):
labels = tf.constant([0, 4, 1, 3, 2])
ignore_label = 4
num_classes = 5
label_weights = 0.5
expected_label_weight_mask = np.array([0.5, 0.0, 0.5, 0.5, 0.5],
dtype=np.float32)
with self.test_session() as sess:
label_weight_mask = utils.get_label_weight_mask(
labels, ignore_label, num_classes, label_weights=label_weights)
label_weight_mask = sess.run(label_weight_mask)
self.assertAllEqual(label_weight_mask, expected_label_weight_mask)
def testGetLabelWeightMask_withListLabelWeights(self):
labels = tf.constant([0, 4, 1, 3, 2])
ignore_label = 4
num_classes = 5
label_weights = [0.0, 0.1, 0.2, 0.3, 0.4]
expected_label_weight_mask = np.array([0.0, 0.0, 0.1, 0.3, 0.2],
dtype=np.float32)
with self.test_session() as sess:
label_weight_mask = utils.get_label_weight_mask(
labels, ignore_label, num_classes, label_weights=label_weights)
label_weight_mask = sess.run(label_weight_mask)
self.assertAllEqual(label_weight_mask, expected_label_weight_mask)
def testGetLabelWeightMask_withInvalidLabelWeightsType(self):
labels = tf.constant([0, 4, 1, 3, 2])
ignore_label = 4
num_classes = 5
self.assertRaisesWithRegexpMatch(
ValueError,
'^The type of label_weights is invalid, it must be a float or a list',
utils.get_label_weight_mask,
labels=labels,
ignore_label=ignore_label,
num_classes=num_classes,
label_weights=None)
def testGetLabelWeightMask_withInvalidLabelWeightsLength(self):
labels = tf.constant([0, 4, 1, 3, 2])
ignore_label = 4
num_classes = 5
label_weights = [0.0, 0.1, 0.2]
self.assertRaisesWithRegexpMatch(
ValueError,
'^Length of label_weights must be equal to num_classes if it is a list',
utils.get_label_weight_mask,
labels=labels,
ignore_label=ignore_label,
num_classes=num_classes,
label_weights=label_weights)
if __name__ == '__main__':
tf.test.main()
| 3,252 | 34.747253 | 80 | py |
models | models-master/research/deeplab/core/conv2d_ws_test.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv2d_ws."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from deeplab.core import conv2d_ws
class ConvolutionTest(tf.test.TestCase):
def testInvalidShape(self):
with self.cached_session():
images_3d = tf.random_uniform((5, 6, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 4, got 5'):
conv2d_ws.conv2d(images_3d, 32, 3)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv2d_ws.conv2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvWithWS(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = conv2d_ws.conv2d(
images, 32, [3, 3], use_weight_standardization=True)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32)
output = conv2d_ws.conv2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = conv2d_ws.conv2d(
images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.cached_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.variable_scope('test', custom_getter=custom_getter):
images = tf.random_uniform((5, height, width, 32), seed=1)
conv2d_ws.conv2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = conv2d_ws.conv2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 8
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(contrib_framework.get_variables('conv1/weights'))
self.assertFalse(contrib_framework.get_variables('conv1/biases'))
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(contrib_framework.get_variables('conv1/weights'))
self.assertTrue(contrib_framework.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with tf.name_scope('fe'):
conv = conv2d_ws.conv2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = tf.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['Conv'])
self.assertEqual(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
with self.cached_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
regularizer = contrib_layers.l2_regularizer(weight_decay)
conv2d_ws.conv2d(images, 32, [3, 3], weights_regularizer=regularizer)
l2_loss = tf.nn.l2_loss(
contrib_framework.get_variables_by_name('weights')[0])
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
sess.run(tf.global_variables_initializer())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(contrib_framework.get_variables()), 2)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 2)
def testNonReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 2)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
weight_decay = contrib_layers.l2_regularizer(0.01)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
weights_regularizer=weight_decay):
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(contrib_framework.get_variables()), 2)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 2)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
normalizer_fn=contrib_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = conv2d_ws.conv2d(images, 32, [3, 3])
net = conv2d_ws.conv2d(net, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 8)
self.assertEqual(
len(contrib_framework.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(
len(contrib_framework.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
normalizer_fn=contrib_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = conv2d_ws.conv2d(images, 32, [3, 3], scope='Conv')
net = conv2d_ws.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 4)
self.assertEqual(
len(contrib_framework.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(
len(contrib_framework.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(contrib_framework.get_variables('conv1/weights'))
self.assertFalse(contrib_framework.get_variables('conv1/biases'))
conv2d_ws.conv2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(contrib_framework.get_variables('conv1/weights'))
self.assertTrue(contrib_framework.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoThreeValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 6, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.cached_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=1, padding='VALID')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
if tf.test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
expected_size_dynamic = [5, num_filters, 7, 9]
with self.session(use_gpu=True):
images = tf.placeholder(np.float32, [None, input_size[1], None, None])
output = conv2d_ws.conv2d(
images,
num_filters, [3, 3],
rate=1,
padding='VALID',
data_format='NCHW')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.cached_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images,
num_filters, [3, 3],
rate=2,
padding='VALID',
activation_fn=None,
scope='conv7')
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
if __name__ == '__main__':
tf.test.main()
| 18,478 | 42.893112 | 80 | py |
models | models-master/research/deeplab/datasets/remove_gt_colormap.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Removes the color map from segmentation annotations.
Removes the color map from the ground truth segmentation annotations and save
the results to output_dir.
"""
import glob
import os.path
import numpy as np
from PIL import Image
import tensorflow as tf
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('original_gt_folder',
'./VOCdevkit/VOC2012/SegmentationClass',
'Original ground truth annotations.')
tf.compat.v1.flags.DEFINE_string('segmentation_format', 'png', 'Segmentation format.')
tf.compat.v1.flags.DEFINE_string('output_dir',
'./VOCdevkit/VOC2012/SegmentationClassRaw',
'folder to save modified ground truth annotations.')
def _remove_colormap(filename):
"""Removes the color map from the annotation.
Args:
filename: Ground truth annotation filename.
Returns:
Annotation without color map.
"""
return np.array(Image.open(filename))
def _save_annotation(annotation, filename):
"""Saves the annotation as png file.
Args:
annotation: Segmentation annotation.
filename: Output filename.
"""
pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))
with tf.io.gfile.GFile(filename, mode='w') as f:
pil_image.save(f, 'PNG')
def main(unused_argv):
# Create the output directory if not exists.
if not tf.io.gfile.isdir(FLAGS.output_dir):
tf.io.gfile.makedirs(FLAGS.output_dir)
annotations = glob.glob(os.path.join(FLAGS.original_gt_folder,
'*.' + FLAGS.segmentation_format))
for annotation in annotations:
raw_annotation = _remove_colormap(annotation)
filename = os.path.basename(annotation)[:-4]
_save_annotation(raw_annotation,
os.path.join(
FLAGS.output_dir,
filename + '.' + FLAGS.segmentation_format))
if __name__ == '__main__':
tf.compat.v1.app.run()
| 2,703 | 31.190476 | 86 | py |
models | models-master/research/deeplab/datasets/data_generator.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentaion data.
The SegmentationDataset class provides both images and annotations (semantic
segmentation and/or instance segmentation) for TensorFlow. Currently, we
support the following datasets:
1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/).
PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects
(e.g., bike, person, and so on) and leaves all the other semantic classes as
one background class. The dataset contains 1464, 1449, and 1456 annotated
images for the training, validation and test respectively.
2. Cityscapes dataset (https://www.cityscapes-dataset.com)
The Cityscapes dataset contains 19 semantic labels (such as road, person, car,
and so on) for urban street scenes.
3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K)
The ADE20K dataset contains 150 semantic labels both urban street scenes and
indoor scenes.
References:
M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn,
and A. Zisserman, The pascal visual object classes challenge a retrospective.
IJCV, 2014.
M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson,
U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban
scene understanding," In Proc. of CVPR, 2016.
B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing
through ADE20K dataset", In Proc. of CVPR, 2017.
"""
import collections
import os
import tensorflow as tf
from deeplab import common
from deeplab import input_preprocess
# Named tuple to describe the dataset properties.
DatasetDescriptor = collections.namedtuple(
'DatasetDescriptor',
[
'splits_to_sizes', # Splits of the dataset into training, val and test.
'num_classes', # Number of semantic classes, including the
# background class (if exists). For example, there
# are 20 foreground classes + 1 background class in
# the PASCAL VOC 2012 dataset. Thus, we set
# num_classes=21.
'ignore_label', # Ignore label value.
])
_CITYSCAPES_INFORMATION = DatasetDescriptor(
splits_to_sizes={'train_fine': 2975,
'train_coarse': 22973,
'trainval_fine': 3475,
'trainval_coarse': 23473,
'val_fine': 500,
'test_fine': 1525},
num_classes=19,
ignore_label=255,
)
_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 1464,
'train_aug': 10582,
'trainval': 2913,
'val': 1449,
},
num_classes=21,
ignore_label=255,
)
_ADE20K_INFORMATION = DatasetDescriptor(
splits_to_sizes={
'train': 20210, # num of samples in images/training
'val': 2000, # num of samples in images/validation
},
num_classes=151,
ignore_label=0,
)
_DATASETS_INFORMATION = {
'cityscapes': _CITYSCAPES_INFORMATION,
'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION,
'ade20k': _ADE20K_INFORMATION,
}
# Default file pattern of TFRecord of TensorFlow Example.
_FILE_PATTERN = '%s-*'
def get_cityscapes_dataset_name():
return 'cityscapes'
class Dataset(object):
"""Represents input dataset for deeplab model."""
def __init__(self,
dataset_name,
split_name,
dataset_dir,
batch_size,
crop_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
model_variant=None,
num_readers=1,
is_training=False,
should_shuffle=False,
should_repeat=False):
"""Initializes the dataset.
Args:
dataset_name: Dataset name.
split_name: A train/val Split name.
dataset_dir: The directory of the dataset sources.
batch_size: Batch size.
crop_size: The size used to crop the image and label.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
model_variant: Model variant (string) for choosing how to mean-subtract
the images. See feature_extractor.network_map for supported model
variants.
num_readers: Number of readers for data provider.
is_training: Boolean, if dataset is for training or not.
should_shuffle: Boolean, if should shuffle the input data.
should_repeat: Boolean, if should repeat the input data.
Raises:
ValueError: Dataset name and split name are not supported.
"""
if dataset_name not in _DATASETS_INFORMATION:
raise ValueError('The specified dataset is not supported yet.')
self.dataset_name = dataset_name
splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes
if split_name not in splits_to_sizes:
raise ValueError('data split name %s not recognized' % split_name)
if model_variant is None:
tf.logging.warning('Please specify a model_variant. See '
'feature_extractor.network_map for supported model '
'variants.')
self.split_name = split_name
self.dataset_dir = dataset_dir
self.batch_size = batch_size
self.crop_size = crop_size
self.min_resize_value = min_resize_value
self.max_resize_value = max_resize_value
self.resize_factor = resize_factor
self.min_scale_factor = min_scale_factor
self.max_scale_factor = max_scale_factor
self.scale_factor_step_size = scale_factor_step_size
self.model_variant = model_variant
self.num_readers = num_readers
self.is_training = is_training
self.should_shuffle = should_shuffle
self.should_repeat = should_repeat
self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes
self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label
def _parse_function(self, example_proto):
"""Function to parse the example proto.
Args:
example_proto: Proto in the format of tf.Example.
Returns:
A dictionary with parsed image, label, height, width and image name.
Raises:
ValueError: Label is of wrong shape.
"""
# Currently only supports jpeg and png.
# Need to use this logic because the shape is not known for
# tf.image.decode_image and we rely on this info to
# extend label if necessary.
def _decode_image(content, channels):
return tf.cond(
tf.image.is_jpeg(content),
lambda: tf.image.decode_jpeg(content, channels),
lambda: tf.image.decode_png(content, channels))
features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/segmentation/class/format':
tf.FixedLenFeature((), tf.string, default_value='png'),
}
parsed_features = tf.parse_single_example(example_proto, features)
image = _decode_image(parsed_features['image/encoded'], channels=3)
label = None
if self.split_name != common.TEST_SET:
label = _decode_image(
parsed_features['image/segmentation/class/encoded'], channels=1)
image_name = parsed_features['image/filename']
if image_name is None:
image_name = tf.constant('')
sample = {
common.IMAGE: image,
common.IMAGE_NAME: image_name,
common.HEIGHT: parsed_features['image/height'],
common.WIDTH: parsed_features['image/width'],
}
if label is not None:
if label.get_shape().ndims == 2:
label = tf.expand_dims(label, 2)
elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:
pass
else:
raise ValueError('Input label shape must be [height, width], or '
'[height, width, 1].')
label.set_shape([None, None, 1])
sample[common.LABELS_CLASS] = label
return sample
def _preprocess_image(self, sample):
"""Preprocesses the image and label.
Args:
sample: A sample containing image and label.
Returns:
sample: Sample with preprocessed image and label.
Raises:
ValueError: Ground truth label not provided during training.
"""
image = sample[common.IMAGE]
label = sample[common.LABELS_CLASS]
original_image, image, label = input_preprocess.preprocess_image_and_label(
image=image,
label=label,
crop_height=self.crop_size[0],
crop_width=self.crop_size[1],
min_resize_value=self.min_resize_value,
max_resize_value=self.max_resize_value,
resize_factor=self.resize_factor,
min_scale_factor=self.min_scale_factor,
max_scale_factor=self.max_scale_factor,
scale_factor_step_size=self.scale_factor_step_size,
ignore_label=self.ignore_label,
is_training=self.is_training,
model_variant=self.model_variant)
sample[common.IMAGE] = image
if not self.is_training:
# Original image is only used during visualization.
sample[common.ORIGINAL_IMAGE] = original_image
if label is not None:
sample[common.LABEL] = label
# Remove common.LABEL_CLASS key in the sample since it is only used to
# derive label and not used in training and evaluation.
sample.pop(common.LABELS_CLASS, None)
return sample
def get_one_shot_iterator(self):
"""Gets an iterator that iterates across the dataset once.
Returns:
An iterator of type tf.data.Iterator.
"""
files = self._get_all_files()
dataset = (
tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)
.map(self._parse_function, num_parallel_calls=self.num_readers)
.map(self._preprocess_image, num_parallel_calls=self.num_readers))
if self.should_shuffle:
dataset = dataset.shuffle(buffer_size=100)
if self.should_repeat:
dataset = dataset.repeat() # Repeat forever for training.
else:
dataset = dataset.repeat(1)
dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)
return dataset.make_one_shot_iterator()
def _get_all_files(self):
"""Gets all the files to read data from.
Returns:
A list of input files.
"""
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(self.dataset_dir,
file_pattern % self.split_name)
return tf.gfile.Glob(file_pattern)
| 12,195 | 33.746439 | 80 | py |
models | models-master/research/deeplab/datasets/build_cityscapes_data.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Cityscapes data to TFRecord file format with Example protos.
The Cityscapes dataset is expected to have the following directory structure:
+ cityscapes
- build_cityscapes_data.py (current working directiory).
- build_data.py
+ cityscapesscripts
+ annotation
+ evaluation
+ helpers
+ preparation
+ viewer
+ gtFine
+ train
+ val
+ test
+ leftImg8bit
+ train
+ val
+ test
+ tfrecord
This script converts data into sharded data files and save at tfrecord folder.
Note that before running this script, the users should (1) register the
Cityscapes dataset website at https://www.cityscapes-dataset.com to
download the dataset, and (2) run the script provided by Cityscapes
`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth.
Also note that the tensorflow model will be trained with `TrainId' instead
of `EvalId' used on the evaluation server. Thus, the users need to convert
the predicted labels to `EvalId` for evaluation on the server. See the
vis.py for more details.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os.path
import re
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('cityscapes_root',
'./cityscapes',
'Cityscapes dataset root folder.')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
_NUM_SHARDS = 10
# A map from data type to folder name that saves the data.
_FOLDERS_MAP = {
'image': 'leftImg8bit',
'label': 'gtFine',
}
# A map from data type to filename postfix.
_POSTFIX_MAP = {
'image': '_leftImg8bit',
'label': '_gtFine_labelTrainIds',
}
# A map from data type to data format.
_DATA_FORMAT_MAP = {
'image': 'png',
'label': 'png',
}
# Image file pattern.
_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image'])
def _get_files(data, dataset_split):
"""Gets files for the specified data type and dataset split.
Args:
data: String, desired data ('image' or 'label').
dataset_split: String, dataset split ('train_fine', 'val_fine', 'test_fine')
Returns:
A list of sorted file names or None when getting label for
test set.
"""
if dataset_split == 'train_fine':
split_dir = 'train'
elif dataset_split == 'val_fine':
split_dir = 'val'
elif dataset_split == 'test_fine':
split_dir = 'test'
else:
raise RuntimeError("Split {} is not supported".format(dataset_split))
pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data])
search_files = os.path.join(
FLAGS.cityscapes_root, _FOLDERS_MAP[data], split_dir, '*', pattern)
filenames = glob.glob(search_files)
return sorted(filenames)
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train_fine, val_fine).
Raises:
RuntimeError: If loaded image and label have different shape, or if the
image file with specified postfix could not be found.
"""
image_files = _get_files('image', dataset_split)
label_files = _get_files('label', dataset_split)
num_images = len(image_files)
num_labels = len(label_files)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
if num_images != num_labels:
raise RuntimeError("The number of images and labels doesn't match: {} {}".format(num_images, num_labels))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
shard_filename = '%s-%05d-of-%05d.tfrecord' % (
dataset_split, shard_id, _NUM_SHARDS)
output_filename = os.path.join(FLAGS.output_dir, shard_filename)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, num_images, shard_id))
sys.stdout.flush()
# Read the image.
image_data = tf.gfile.FastGFile(image_files[i], 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
re_match = _IMAGE_FILENAME_RE.search(image_files[i])
if re_match is None:
raise RuntimeError('Invalid image filename: ' + image_files[i])
filename = os.path.basename(re_match.group(1))
example = build_data.image_seg_to_tfexample(
image_data, filename, height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
# Only support converting 'train_fine', 'val_fine' and 'test_fine' sets for now.
for dataset_split in ['train_fine', 'val_fine', 'test_fine']:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
| 6,679 | 32.567839 | 109 | py |
models | models-master/research/deeplab/datasets/build_ade20k_data.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ADE20K data to TFRecord file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'train_image_folder',
'./ADE20K/ADEChallengeData2016/images/training',
'Folder containing trainng images')
tf.app.flags.DEFINE_string(
'train_image_label_folder',
'./ADE20K/ADEChallengeData2016/annotations/training',
'Folder containing annotations for trainng images')
tf.app.flags.DEFINE_string(
'val_image_folder',
'./ADE20K/ADEChallengeData2016/images/validation',
'Folder containing validation images')
tf.app.flags.DEFINE_string(
'val_image_label_folder',
'./ADE20K/ADEChallengeData2016/annotations/validation',
'Folder containing annotations for validation')
tf.app.flags.DEFINE_string(
'output_dir', './ADE20K/tfrecord',
'Path to save converted tfrecord of Tensorflow example')
_NUM_SHARDS = 4
def _convert_dataset(dataset_split, dataset_dir, dataset_label_dir):
"""Converts the ADE20k dataset into into tfrecord format.
Args:
dataset_split: Dataset split (e.g., train, val).
dataset_dir: Dir in which the dataset locates.
dataset_label_dir: Dir in which the annotations locates.
Raises:
RuntimeError: If loaded image and label have different shape.
"""
img_names = tf.gfile.Glob(os.path.join(dataset_dir, '*.jpg'))
random.shuffle(img_names)
seg_names = []
for f in img_names:
# get the filename without the extension
basename = os.path.basename(f).split('.')[0]
# cover its corresponding *_seg.png
seg = os.path.join(dataset_label_dir, basename+'.png')
seg_names.append(seg)
num_images = len(img_names)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
image_reader = build_data.ImageReader('jpeg', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
output_filename = os.path.join(
FLAGS.output_dir,
'%s-%05d-of-%05d.tfrecord' % (dataset_split, shard_id, _NUM_SHARDS))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, num_images, shard_id))
sys.stdout.flush()
# Read the image.
image_filename = img_names[i]
image_data = tf.gfile.FastGFile(image_filename, 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_filename = seg_names[i]
seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
example = build_data.image_seg_to_tfexample(
image_data, img_names[i], height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
tf.gfile.MakeDirs(FLAGS.output_dir)
_convert_dataset(
'train', FLAGS.train_image_folder, FLAGS.train_image_label_folder)
_convert_dataset('val', FLAGS.val_image_folder, FLAGS.val_image_label_folder)
if __name__ == '__main__':
tf.app.run()
| 4,409 | 34.564516 | 80 | py |
models | models-master/research/deeplab/datasets/build_voc2012_data.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts PASCAL VOC 2012 data to TFRecord file format with Example protos.
PASCAL VOC 2012 dataset is expected to have the following directory structure:
+ pascal_voc_seg
- build_data.py
- build_voc2012_data.py (current working directory).
+ VOCdevkit
+ VOC2012
+ JPEGImages
+ SegmentationClass
+ ImageSets
+ Segmentation
+ tfrecord
Image folder:
./VOCdevkit/VOC2012/JPEGImages
Semantic segmentation annotations:
./VOCdevkit/VOC2012/SegmentationClass
list folder:
./VOCdevkit/VOC2012/ImageSets/Segmentation
This script converts data into sharded data files and save at tfrecord folder.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('image_folder',
'./VOCdevkit/VOC2012/JPEGImages',
'Folder containing images.')
tf.app.flags.DEFINE_string(
'semantic_segmentation_folder',
'./VOCdevkit/VOC2012/SegmentationClassRaw',
'Folder containing semantic segmentation annotations.')
tf.app.flags.DEFINE_string(
'list_folder',
'./VOCdevkit/VOC2012/ImageSets/Segmentation',
'Folder containing lists for training and validation')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
_NUM_SHARDS = 4
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, test).
Raises:
RuntimeError: If loaded image and label have different shape.
"""
dataset = os.path.basename(dataset_split)[:-4]
sys.stdout.write('Processing ' + dataset)
filenames = [x.strip('\n') for x in open(dataset_split, 'r')]
num_images = len(filenames)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
image_reader = build_data.ImageReader('jpeg', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
output_filename = os.path.join(
FLAGS.output_dir,
'%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, len(filenames), shard_id))
sys.stdout.flush()
# Read the image.
image_filename = os.path.join(
FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format)
image_data = tf.gfile.GFile(image_filename, 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_filename = os.path.join(
FLAGS.semantic_segmentation_folder,
filenames[i] + '.' + FLAGS.label_format)
seg_data = tf.gfile.GFile(seg_filename, 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
example = build_data.image_seg_to_tfexample(
image_data, filenames[i], height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt'))
for dataset_split in dataset_splits:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
| 5,008 | 33.07483 | 80 | py |
models | models-master/research/deeplab/datasets/build_data.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common utility functions and classes for building dataset.
This script contains utility functions and classes to converts dataset to
TFRecord file format with Example protos.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
import collections
import six
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_enum('image_format', 'png', ['jpg', 'jpeg', 'png'],
'Image format.')
tf.app.flags.DEFINE_enum('label_format', 'png', ['png'],
'Segmentation label format.')
# A map from image format to expected data format.
_IMAGE_FORMAT_MAP = {
'jpg': 'jpeg',
'jpeg': 'jpeg',
'png': 'png',
}
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self, image_format='jpeg', channels=3):
"""Class constructor.
Args:
image_format: Image format. Only 'jpeg', 'jpg', or 'png' are supported.
channels: Image channels.
"""
with tf.Graph().as_default():
self._decode_data = tf.placeholder(dtype=tf.string)
self._image_format = image_format
self._session = tf.Session()
if self._image_format in ('jpeg', 'jpg'):
self._decode = tf.image.decode_jpeg(self._decode_data,
channels=channels)
elif self._image_format == 'png':
self._decode = tf.image.decode_png(self._decode_data,
channels=channels)
def read_image_dims(self, image_data):
"""Reads the image dimensions.
Args:
image_data: string of image data.
Returns:
image_height and image_width.
"""
image = self.decode_image(image_data)
return image.shape[:2]
def decode_image(self, image_data):
"""Decodes the image data string.
Args:
image_data: string of image data.
Returns:
Decoded image data.
Raises:
ValueError: Value of image channels not supported.
"""
image = self._session.run(self._decode,
feed_dict={self._decode_data: image_data})
if len(image.shape) != 3 or image.shape[2] not in (1, 3):
raise ValueError('The image channels not supported.')
return image
def _int64_list_feature(values):
"""Returns a TF-Feature of int64_list.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, collections.Iterable):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _bytes_list_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
def norm2bytes(value):
return value.encode() if isinstance(value, str) and six.PY3 else value
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))
def image_seg_to_tfexample(image_data, filename, height, width, seg_data):
"""Converts one image/segmentation pair to tf example.
Args:
image_data: string of image data.
filename: image filename.
height: image height.
width: image width.
seg_data: string of semantic segmentation data.
Returns:
tf example of one image/segmentation pair.
"""
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_list_feature(image_data),
'image/filename': _bytes_list_feature(filename),
'image/format': _bytes_list_feature(
_IMAGE_FORMAT_MAP[FLAGS.image_format]),
'image/height': _int64_list_feature(height),
'image/width': _int64_list_feature(width),
'image/channels': _int64_list_feature(3),
'image/segmentation/class/encoded': (
_bytes_list_feature(seg_data)),
'image/segmentation/class/format': _bytes_list_feature(
FLAGS.label_format),
}))
| 4,909 | 29.308642 | 80 | py |
models | models-master/research/deeplab/datasets/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/deeplab/datasets/data_generator_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for deeplab.datasets.data_generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
import tensorflow as tf
from deeplab import common
from deeplab.datasets import data_generator
ImageAttributes = collections.namedtuple(
'ImageAttributes', ['image', 'label', 'height', 'width', 'image_name'])
class DatasetTest(tf.test.TestCase):
# Note: training dataset cannot be tested since there is shuffle operation.
# When disabling the shuffle, training dataset is operated same as validation
# dataset. Therefore it is not tested again.
def testPascalVocSegTestData(self):
dataset = data_generator.Dataset(
dataset_name='pascal_voc_seg',
split_name='val',
dataset_dir=
'deeplab/testing/pascal_voc_seg',
batch_size=1,
crop_size=[3, 3], # Use small size for testing.
min_resize_value=3,
max_resize_value=3,
resize_factor=None,
min_scale_factor=0.01,
max_scale_factor=2.0,
scale_factor_step_size=0.25,
is_training=False,
model_variant='mobilenet_v2')
self.assertAllEqual(dataset.num_of_classes, 21)
self.assertAllEqual(dataset.ignore_label, 255)
num_of_images = 3
with self.test_session() as sess:
iterator = dataset.get_one_shot_iterator()
for i in range(num_of_images):
batch = iterator.get_next()
batch, = sess.run([batch])
image_attributes = _get_attributes_of_image(i)
self.assertEqual(batch[common.HEIGHT][0], image_attributes.height)
self.assertEqual(batch[common.WIDTH][0], image_attributes.width)
self.assertEqual(batch[common.IMAGE_NAME][0],
image_attributes.image_name.encode())
# All data have been read.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError, ''):
sess.run([iterator.get_next()])
def _get_attributes_of_image(index):
"""Gets the attributes of the image.
Args:
index: Index of image in all images.
Returns:
Attributes of the image in the format of ImageAttributes.
Raises:
ValueError: If index is of wrong value.
"""
if index == 0:
return ImageAttributes(
image=None,
label=None,
height=366,
width=500,
image_name='2007_000033')
elif index == 1:
return ImageAttributes(
image=None,
label=None,
height=335,
width=500,
image_name='2007_000042')
elif index == 2:
return ImageAttributes(
image=None,
label=None,
height=333,
width=500,
image_name='2007_000061')
else:
raise ValueError('Index can only be 0, 1 or 2.')
if __name__ == '__main__':
tf.test.main()
| 3,546 | 29.577586 | 80 | py |
models | models-master/research/deeplab/utils/get_dataset_colormap_test.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for get_dataset_colormap.py."""
import numpy as np
import tensorflow as tf
from deeplab.utils import get_dataset_colormap
class VisualizationUtilTest(tf.test.TestCase):
def testBitGet(self):
"""Test that if the returned bit value is correct."""
self.assertEqual(1, get_dataset_colormap.bit_get(9, 0))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 1))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 2))
self.assertEqual(1, get_dataset_colormap.bit_get(9, 3))
def testPASCALLabelColorMapValue(self):
"""Test the getd color map value."""
colormap = get_dataset_colormap.create_pascal_label_colormap()
# Only test a few sampled entries in the color map.
self.assertTrue(np.array_equal([128., 0., 128.], colormap[5, :]))
self.assertTrue(np.array_equal([128., 192., 128.], colormap[23, :]))
self.assertTrue(np.array_equal([128., 0., 192.], colormap[37, :]))
self.assertTrue(np.array_equal([224., 192., 192.], colormap[127, :]))
self.assertTrue(np.array_equal([192., 160., 192.], colormap[175, :]))
def testLabelToPASCALColorImage(self):
"""Test the value of the converted label value."""
label = np.array([[0, 16, 16], [52, 7, 52]])
expected_result = np.array([
[[0, 0, 0], [0, 64, 0], [0, 64, 0]],
[[0, 64, 192], [128, 128, 128], [0, 64, 192]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
self.assertTrue(np.array_equal(expected_result, colored_label))
def testUnExpectedLabelValueForLabelToPASCALColorImage(self):
"""Raise ValueError when input value exceeds range."""
label = np.array([[120], [600]])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testUnExpectedLabelDimensionForLabelToPASCALColorImage(self):
"""Raise ValueError if input dimension is not correct."""
label = np.array([120])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testGetColormapForUnsupportedDataset(self):
with self.assertRaises(ValueError):
get_dataset_colormap.create_label_colormap('unsupported_dataset')
def testUnExpectedLabelDimensionForLabelToADE20KColorImage(self):
label = np.array([250])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
def testFirstColorInADE20KColorMap(self):
label = np.array([[1, 3], [10, 20]])
expected_result = np.array([
[[120, 120, 120], [6, 230, 230]],
[[4, 250, 7], [204, 70, 3]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
self.assertTrue(np.array_equal(colored_label, expected_result))
def testMapillaryVistasColorMapValue(self):
colormap = get_dataset_colormap.create_mapillary_vistas_label_colormap()
self.assertTrue(np.array_equal([190, 153, 153], colormap[3, :]))
self.assertTrue(np.array_equal([102, 102, 156], colormap[6, :]))
if __name__ == '__main__':
tf.test.main()
| 3,979 | 39.612245 | 80 | py |
models | models-master/research/deeplab/utils/train_utils.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
import six
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from deeplab.core import preprocess_utils
from deeplab.core import utils
def _div_maybe_zero(total_loss, num_present):
"""Normalizes the total loss with the number of present pixels."""
return tf.to_float(num_present > 0) * tf.math.divide(
total_loss,
tf.maximum(1e-5, num_present))
def add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
loss_weight=1.0,
upsample_logits=True,
hard_example_mining_step=0,
top_k_percent_pixels=1.0,
gt_is_matting_map=False,
scope=None):
"""Adds softmax cross entropy loss for logits of each scale.
Args:
scales_to_logits: A map from logits names for different scales to logits.
The logits have shape [batch, logits_height, logits_width, num_classes].
labels: Groundtruth labels with shape [batch, image_height, image_width, 1].
num_classes: Integer, number of target classes.
ignore_label: Integer, label to ignore.
loss_weight: A float or a list of loss weights. If it is a float, it means
all the labels have the same weight. If it is a list of weights, then each
element in the list represents the weight for the label of its index, for
example, loss_weight = [0.1, 0.5] means the weight for label 0 is 0.1 and
the weight for label 1 is 0.5.
upsample_logits: Boolean, upsample logits or not.
hard_example_mining_step: An integer, the training step in which the hard
exampling mining kicks off. Note that we gradually reduce the mining
percent to the top_k_percent_pixels. For example, if
hard_example_mining_step = 100K and top_k_percent_pixels = 0.25, then
mining percent will gradually reduce from 100% to 25% until 100K steps
after which we only mine top 25% pixels.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value
< 1.0, only compute the loss for the top k percent pixels (e.g., the top
20% pixels). This is useful for hard pixel mining.
gt_is_matting_map: If true, the groundtruth is a matting map of confidence
score. If false, the groundtruth is an integer valued class mask.
scope: String, the scope for the loss.
Raises:
ValueError: Label or logits is None, or groundtruth is matting map while
label is not floating value.
"""
if labels is None:
raise ValueError('No label for softmax cross entropy loss.')
# If input groundtruth is a matting map of confidence, check if the input
# labels are floating point values.
if gt_is_matting_map and not labels.dtype.is_floating:
raise ValueError('Labels must be floats if groundtruth is a matting map.')
for scale, logits in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
if upsample_logits:
# Label is not downsampled, and instead we upsample logits.
logits = tf.image.resize_bilinear(
logits,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True)
scaled_labels = labels
else:
# Label is downsampled to the same size as logits.
# When gt_is_matting_map = true, label downsampling with nearest neighbor
# method may introduce artifacts. However, to avoid ignore_label from
# being interpolated with other labels, we still perform nearest neighbor
# interpolation.
# TODO(huizhongc): Change to bilinear interpolation by processing padded
# and non-padded label separately.
if gt_is_matting_map:
tf.logging.warning(
'Label downsampling with nearest neighbor may introduce artifacts.')
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
preprocess_utils.resolve_shape(logits, 4)[1:3],
align_corners=True)
scaled_labels = tf.reshape(scaled_labels, shape=[-1])
weights = utils.get_label_weight_mask(
scaled_labels, ignore_label, num_classes, label_weights=loss_weight)
# Dimension of keep_mask is equal to the total number of pixels.
keep_mask = tf.cast(
tf.not_equal(scaled_labels, ignore_label), dtype=tf.float32)
train_labels = None
logits = tf.reshape(logits, shape=[-1, num_classes])
if gt_is_matting_map:
# When the groundtruth is integer label mask, we can assign class
# dependent label weights to the loss. When the groundtruth is image
# matting confidence, we do not apply class-dependent label weight (i.e.,
# label_weight = 1.0).
if loss_weight != 1.0:
raise ValueError(
'loss_weight must equal to 1 if groundtruth is matting map.')
# Assign label value 0 to ignore pixels. The exact label value of ignore
# pixel does not matter, because those ignore_value pixel losses will be
# multiplied to 0 weight.
train_labels = scaled_labels * keep_mask
train_labels = tf.expand_dims(train_labels, 1)
train_labels = tf.concat([1 - train_labels, train_labels], axis=1)
else:
train_labels = tf.one_hot(
scaled_labels, num_classes, on_value=1.0, off_value=0.0)
default_loss_scope = ('softmax_all_pixel_loss'
if top_k_percent_pixels == 1.0 else
'softmax_hard_example_mining')
with tf.name_scope(loss_scope, default_loss_scope,
[logits, train_labels, weights]):
# Compute the loss for all pixels.
pixel_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(
train_labels, name='train_labels_stop_gradient'),
logits=logits,
name='pixel_losses')
weighted_pixel_losses = tf.multiply(pixel_losses, weights)
if top_k_percent_pixels == 1.0:
total_loss = tf.reduce_sum(weighted_pixel_losses)
num_present = tf.reduce_sum(keep_mask)
loss = _div_maybe_zero(total_loss, num_present)
tf.losses.add_loss(loss)
else:
num_pixels = tf.to_float(tf.shape(logits)[0])
# Compute the top_k_percent pixels based on current training step.
if hard_example_mining_step == 0:
# Directly focus on the top_k pixels.
top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels)
else:
# Gradually reduce the mining percent to top_k_percent_pixels.
global_step = tf.to_float(tf.train.get_or_create_global_step())
ratio = tf.minimum(1.0, global_step / hard_example_mining_step)
top_k_pixels = tf.to_int32(
(ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels)
top_k_losses, _ = tf.nn.top_k(weighted_pixel_losses,
k=top_k_pixels,
sorted=True,
name='top_k_percent_pixels')
total_loss = tf.reduce_sum(top_k_losses)
num_present = tf.reduce_sum(
tf.to_float(tf.not_equal(top_k_losses, 0.0)))
loss = _div_maybe_zero(total_loss, num_present)
tf.losses.add_loss(loss)
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = contrib_framework.get_variables_to_restore(
exclude=exclude_list)
if variables_to_restore:
init_op, init_feed_dict = contrib_framework.assign_from_checkpoint(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
global_step = tf.train.get_or_create_global_step()
def restore_fn(sess):
sess.run(init_op, init_feed_dict)
sess.run([global_step])
return restore_fn
return None
def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):
"""Gets the gradient multipliers.
The gradient multipliers will adjust the learning rates for model
variables. For the task of semantic segmentation, the models are
usually fine-tuned from the models trained on the task of image
classification. To fine-tune the models, we usually set larger (e.g.,
10 times larger) learning rate for the parameters of last layer.
Args:
last_layers: Scopes of last layers.
last_layer_gradient_multiplier: The gradient multiplier for last layers.
Returns:
The gradient multiplier map with variables as key, and multipliers as value.
"""
gradient_multipliers = {}
for var in tf.model_variables():
# Double the learning rate for biases.
if 'biases' in var.op.name:
gradient_multipliers[var.op.name] = 2.
# Use larger learning rate for last layer variables.
for layer in last_layers:
if layer in var.op.name and 'biases' in var.op.name:
gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier
break
elif layer in var.op.name:
gradient_multipliers[var.op.name] = last_layer_gradient_multiplier
break
return gradient_multipliers
def get_model_learning_rate(learning_policy,
base_learning_rate,
learning_rate_decay_step,
learning_rate_decay_factor,
training_number_of_steps,
learning_power,
slow_start_step,
slow_start_learning_rate,
slow_start_burnin_type='none',
decay_steps=0.0,
end_learning_rate=0.0,
boundaries=None,
boundary_learning_rates=None):
"""Gets model's learning rate.
Computes the model's learning rate for different learning policy.
Right now, only "step" and "poly" are supported.
(1) The learning policy for "step" is computed as follows:
current_learning_rate = base_learning_rate *
learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)
See tf.train.exponential_decay for details.
(2) The learning policy for "poly" is computed as follows:
current_learning_rate = base_learning_rate *
(1 - global_step / training_number_of_steps) ^ learning_power
Args:
learning_policy: Learning rate policy for training.
base_learning_rate: The base learning rate for model training.
learning_rate_decay_step: Decay the base learning rate at a fixed step.
learning_rate_decay_factor: The rate to decay the base learning rate.
training_number_of_steps: Number of steps for training.
learning_power: Power used for 'poly' learning policy.
slow_start_step: Training model with small learning rate for the first
few steps.
slow_start_learning_rate: The learning rate employed during slow start.
slow_start_burnin_type: The burnin type for the slow start stage. Can be
`none` which means no burnin or `linear` which means the learning rate
increases linearly from slow_start_learning_rate and reaches
base_learning_rate after slow_start_steps.
decay_steps: Float, `decay_steps` for polynomial learning rate.
end_learning_rate: Float, `end_learning_rate` for polynomial learning rate.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries.
boundary_learning_rates: A list of `Tensor`s or `float`s or `int`s that
specifies the values for the intervals defined by `boundaries`. It should
have one more element than `boundaries`, and all elements should have the
same type.
Returns:
Learning rate for the specified learning policy.
Raises:
ValueError: If learning policy or slow start burnin type is not recognized.
ValueError: If `boundaries` and `boundary_learning_rates` are not set for
multi_steps learning rate decay.
"""
global_step = tf.train.get_or_create_global_step()
adjusted_global_step = tf.maximum(global_step - slow_start_step, 0)
if decay_steps == 0.0:
tf.logging.info('Setting decay_steps to total training steps.')
decay_steps = training_number_of_steps - slow_start_step
if learning_policy == 'step':
learning_rate = tf.train.exponential_decay(
base_learning_rate,
adjusted_global_step,
learning_rate_decay_step,
learning_rate_decay_factor,
staircase=True)
elif learning_policy == 'poly':
learning_rate = tf.train.polynomial_decay(
base_learning_rate,
adjusted_global_step,
decay_steps=decay_steps,
end_learning_rate=end_learning_rate,
power=learning_power)
elif learning_policy == 'cosine':
learning_rate = tf.train.cosine_decay(
base_learning_rate,
adjusted_global_step,
training_number_of_steps - slow_start_step)
elif learning_policy == 'multi_steps':
if boundaries is None or boundary_learning_rates is None:
raise ValueError('Must set `boundaries` and `boundary_learning_rates` '
'for multi_steps learning rate decay.')
learning_rate = tf.train.piecewise_constant_decay(
adjusted_global_step,
boundaries,
boundary_learning_rates)
else:
raise ValueError('Unknown learning policy.')
adjusted_slow_start_learning_rate = slow_start_learning_rate
if slow_start_burnin_type == 'linear':
# Do linear burnin. Increase linearly from slow_start_learning_rate and
# reach base_learning_rate after (global_step >= slow_start_steps).
adjusted_slow_start_learning_rate = (
slow_start_learning_rate +
(base_learning_rate - slow_start_learning_rate) *
tf.to_float(global_step) / slow_start_step)
elif slow_start_burnin_type != 'none':
raise ValueError('Unknown burnin type.')
# Employ small learning rate at the first few steps for warm start.
return tf.where(global_step < slow_start_step,
adjusted_slow_start_learning_rate, learning_rate)
| 16,167 | 42.345845 | 80 | py |
models | models-master/research/deeplab/utils/save_annotation.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves an annotation as one png image.
This script saves an annotation as one png image, and has the option to add
colormap to the png image for better visualization.
"""
import numpy as np
import PIL.Image as img
import tensorflow as tf
from deeplab.utils import get_dataset_colormap
def save_annotation(label,
save_dir,
filename,
add_colormap=True,
normalize_to_unit_values=False,
scale_values=False,
colormap_type=get_dataset_colormap.get_pascal_name()):
"""Saves the given label to image on disk.
Args:
label: The numpy array to be saved. The data will be converted
to uint8 and saved as png image.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
add_colormap: Boolean, add color map to the label or not.
normalize_to_unit_values: Boolean, normalize the input values to [0, 1].
scale_values: Boolean, scale the input values to [0, 255] for visualization.
colormap_type: String, colormap type for visualization.
"""
# Add colormap for visualizing the prediction.
if add_colormap:
colored_label = get_dataset_colormap.label_to_color_image(
label, colormap_type)
else:
colored_label = label
if normalize_to_unit_values:
min_value = np.amin(colored_label)
max_value = np.amax(colored_label)
range_value = max_value - min_value
if range_value != 0:
colored_label = (colored_label - min_value) / range_value
if scale_values:
colored_label = 255. * colored_label
pil_image = img.fromarray(colored_label.astype(dtype=np.uint8))
with tf.gfile.Open('%s/%s.png' % (save_dir, filename), mode='w') as f:
pil_image.save(f, 'PNG')
| 2,540 | 36.925373 | 80 | py |
models | models-master/research/deeplab/utils/get_dataset_colormap.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visualizes the segmentation results via specified color map.
Visualizes the semantic segmentation results by the color map
defined by the different datasets. Supported colormaps are:
* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/).
* Cityscapes dataset (https://www.cityscapes-dataset.com).
* Mapillary Vistas (https://research.mapillary.com).
* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
# Dataset names.
_ADE20K = 'ade20k'
_CITYSCAPES = 'cityscapes'
_MAPILLARY_VISTAS = 'mapillary_vistas'
_PASCAL = 'pascal'
# Max number of entries in the colormap for each dataset.
_DATASET_MAX_ENTRIES = {
_ADE20K: 151,
_CITYSCAPES: 256,
_MAPILLARY_VISTAS: 66,
_PASCAL: 512,
}
def create_ade20k_label_colormap():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray([
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
def create_cityscapes_label_colormap():
"""Creates a label colormap used in CITYSCAPES segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
return colormap
def create_mapillary_vistas_label_colormap():
"""Creates a label colormap used in Mapillary Vistas segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray([
[165, 42, 42],
[0, 192, 0],
[196, 196, 196],
[190, 153, 153],
[180, 165, 180],
[102, 102, 156],
[102, 102, 156],
[128, 64, 255],
[140, 140, 200],
[170, 170, 170],
[250, 170, 160],
[96, 96, 96],
[230, 150, 140],
[128, 64, 128],
[110, 110, 110],
[244, 35, 232],
[150, 100, 100],
[70, 70, 70],
[150, 120, 90],
[220, 20, 60],
[255, 0, 0],
[255, 0, 0],
[255, 0, 0],
[200, 128, 128],
[255, 255, 255],
[64, 170, 64],
[128, 64, 64],
[70, 130, 180],
[255, 255, 255],
[152, 251, 152],
[107, 142, 35],
[0, 170, 30],
[255, 255, 128],
[250, 0, 30],
[0, 0, 0],
[220, 220, 220],
[170, 170, 170],
[222, 40, 40],
[100, 170, 30],
[40, 40, 40],
[33, 33, 33],
[170, 170, 170],
[0, 0, 142],
[170, 170, 170],
[210, 170, 100],
[153, 153, 153],
[128, 128, 128],
[0, 0, 142],
[250, 170, 30],
[192, 192, 192],
[220, 220, 0],
[180, 165, 180],
[119, 11, 32],
[0, 0, 142],
[0, 60, 100],
[0, 0, 142],
[0, 0, 90],
[0, 0, 230],
[0, 80, 100],
[128, 64, 64],
[0, 0, 110],
[0, 0, 70],
[0, 0, 192],
[32, 32, 32],
[0, 0, 0],
[0, 0, 0],
])
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int)
ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int)
for shift in reversed(list(range(8))):
for channel in range(3):
colormap[:, channel] |= bit_get(ind, channel) << shift
ind >>= 3
return colormap
def get_ade20k_name():
return _ADE20K
def get_cityscapes_name():
return _CITYSCAPES
def get_mapillary_vistas_name():
return _MAPILLARY_VISTAS
def get_pascal_name():
return _PASCAL
def bit_get(val, idx):
"""Gets the bit value.
Args:
val: Input value, int or numpy int array.
idx: Which bit of the input val.
Returns:
The "idx"-th bit of input val.
"""
return (val >> idx) & 1
def create_label_colormap(dataset=_PASCAL):
"""Creates a label colormap for the specified dataset.
Args:
dataset: The colormap used in the dataset.
Returns:
A numpy array of the dataset colormap.
Raises:
ValueError: If the dataset is not supported.
"""
if dataset == _ADE20K:
return create_ade20k_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _MAPILLARY_VISTAS:
return create_mapillary_vistas_label_colormap()
elif dataset == _PASCAL:
return create_pascal_label_colormap()
else:
raise ValueError('Unsupported dataset.')
def label_to_color_image(label, dataset=_PASCAL):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
dataset: The colormap used in the dataset.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the dataset color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))
if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]:
raise ValueError(
'label value too large: {} >= {}.'.format(
np.max(label), _DATASET_MAX_ENTRIES[dataset]))
colormap = create_label_colormap(dataset)
return colormap[label]
def get_dataset_colormap_max_entries(dataset):
return _DATASET_MAX_ENTRIES[dataset]
| 9,916 | 22.781775 | 80 | py |
models | models-master/research/deeplab/utils/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/seq_flow_lite/input_fn_reader.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods related to input datasets and readers."""
import functools
import sys
from absl import logging
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_datasets as tfds
import tensorflow_text as tftext
from layers import projection_layers # import seq_flow_lite module
from utils import misc_utils # import seq_flow_lite module
def imdb_reviews(features, _):
return features["text"], features["label"]
def civil_comments(features, runner_config):
labels = runner_config["model_config"]["labels"]
label_tensor = tf.stack([features[label] for label in labels], axis=1)
label_tensor = tf.floor(label_tensor + 0.5)
return features["text"], label_tensor
def goemotions(features, runner_config):
labels = runner_config["model_config"]["labels"]
label_tensor = tf.stack([features[label] for label in labels], axis=1)
return features["comment_text"], tf.cast(label_tensor, tf.float32)
def create_input_fn(runner_config, mode, drop_remainder):
"""Returns an input function to use in the instantiation of tf.estimator.*."""
def _post_processor(features, batch_size):
"""Post process the data to a form expected by model_fn."""
data_processor = getattr(sys.modules[__name__], runner_config["dataset"])
text, label = data_processor(features, runner_config)
model_config = runner_config["model_config"]
if "max_seq_len" in model_config:
max_seq_len = model_config["max_seq_len"]
logging.info("Truncating text to have at most %d tokens", max_seq_len)
text = misc_utils.random_substr(text, max_seq_len)
text = tf.reshape(text, [batch_size])
num_classes = len(model_config["labels"])
label = tf.reshape(label, [batch_size, num_classes])
prxlayer = projection_layers.ProjectionLayer(model_config, mode)
projection, seq_length = prxlayer(text)
gbst_max_token_len = max_seq_len
if "gbst_max_token_len" in model_config:
gbst_max_token_len = model_config["gbst_max_token_len"]
byte_int = tftext.ByteSplitter().split(text).to_tensor(
default_value=0, shape=[batch_size, gbst_max_token_len])
token_ids = tf.cast(byte_int, tf.int32)
token_len = tf.strings.length(text)
mask = tf.cast(
tf.sequence_mask(token_len, maxlen=gbst_max_token_len), tf.int32)
mask *= 3
token_ids += mask
return {
"projection": projection,
"seq_length": seq_length,
"token_ids": token_ids,
"token_len": token_len,
"label": label
}
def _input_fn(params):
"""Method to be used for reading the data."""
assert mode != tf_estimator.ModeKeys.PREDICT
split = "train" if mode == tf_estimator.ModeKeys.TRAIN else "test"
ds = tfds.load(runner_config["dataset"], split=split)
ds = ds.batch(params["batch_size"], drop_remainder=drop_remainder)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
ds = ds.shuffle(buffer_size=100)
ds = ds.repeat(count=1 if mode == tf_estimator.ModeKeys.EVAL else None)
ds = ds.map(
functools.partial(_post_processor, batch_size=params["batch_size"]),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False)
return ds
return _input_fn
| 3,924 | 38.25 | 80 | py |
models | models-master/research/seq_flow_lite/export_to_tflite.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tool to export TFLite model."""
import importlib
import json
import os
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
import tensorflow_text as tftext
from layers import base_layers # import seq_flow_lite module
from layers import projection_layers # import seq_flow_lite module
from utils import tflite_utils # import seq_flow_lite module
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", None, "The output or model directory.")
flags.DEFINE_enum("output", "sigmoid", ["logits", "sigmoid", "softmax"],
"Specification of the output tensor.")
def load_runner_config():
config = os.path.join(FLAGS.output_dir, "runner_config.txt")
with tf.gfile.Open(config, "r") as f:
return json.loads(f.read())
def main(_):
runner_config = load_runner_config()
model_config = runner_config["model_config"]
rel_module_path = "" # empty base dir
model = importlib.import_module(rel_module_path + runner_config["name"])
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as session:
text = tf.placeholder(tf.string, shape=[1], name="Input")
inputs = [text]
if "pqrnn" in runner_config["name"]:
prxlayer = projection_layers.ProjectionLayer(model_config,
base_layers.TFLITE)
encoder = model.Encoder(model_config, base_layers.TFLITE)
projection, seq_length = prxlayer(text)
logits = encoder(projection, seq_length)
else:
byte_int = tftext.ByteSplitter().split(text)
token_ids = tf.cast(byte_int, tf.int32).to_tensor()
token_ids = tf.reshape(token_ids, [1, -1])
token_ids += 3
encoder = model.Encoder(model_config, base_layers.TFLITE)
logits = encoder(token_ids, None)
if FLAGS.output == "logits":
outputs = [logits]
elif FLAGS.output == "sigmoid":
outputs = [tf.math.sigmoid(logits)]
else:
assert FLAGS.output == "softmax", "Unexpected output"
outputs = [tf.nn.softmax(logits)]
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
saver = tf.train.Saver()
saver.restore(session, tf.train.latest_checkpoint(FLAGS.output_dir))
tflite_fb = tflite_utils.generate_tflite(session, graph, inputs, outputs)
output_file_name = os.path.join(FLAGS.output_dir, "tflite.fb")
with tf.gfile.Open(output_file_name, "wb") as f:
f.write(tflite_fb)
if __name__ == "__main__":
app.run(main)
| 3,244 | 37.630952 | 80 | py |
models | models-master/research/seq_flow_lite/metric_functions.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metric functions."""
import tensorflow.compat.v1 as tf
def classification_metric(per_example_loss, label_ids, logits):
"""Compute eval metrics."""
return {
"accuracy":
tf.metrics.accuracy(label_ids, tf.math.argmax(logits, axis=-1)),
"eval_loss":
tf.metrics.mean(per_example_loss)
}
def labeling_metric(per_example_loss, label_ids, logits):
"""Compute eval metrics."""
scores = tf.math.sigmoid(logits)
binary_prediction = tf.math.greater_equal(scores, 0.5)
num_classes = label_ids.get_shape().as_list()[-1]
return_dict = {"eval_loss": tf.metrics.mean(per_example_loss)}
for idx in range(num_classes):
return_dict["auc/" + str(idx)] = tf.metrics.auc(label_ids[:, idx],
scores[:, idx])
return_dict["precision/" + str(idx)] = tf.metrics.precision(
label_ids[:, idx], binary_prediction[:, idx])
return_dict["recall/" + str(idx)] = tf.metrics.recall(
label_ids[:, idx], binary_prediction[:, idx])
return return_dict
| 1,737 | 39.418605 | 80 | py |
models | models-master/research/seq_flow_lite/trainer_v2.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary to train PRADO model with TF 2.0."""
import importlib
import json
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import input_fn_reader # import root module
FLAGS = flags.FLAGS
flags.DEFINE_string("config_path", None, "Path to a RunnerConfig.")
flags.DEFINE_enum("runner_mode", "train", ["train", "train_and_eval", "eval"],
"Runner mode.")
flags.DEFINE_string("master", None, "TensorFlow master URL.")
flags.DEFINE_string(
"output_dir", "/tmp/testV2",
"The output directory where the model checkpoints will be written.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def load_runner_config():
with tf.io.gfile.GFile(FLAGS.config_path, "r") as f:
return json.loads(f.read())
def compute_loss(logits, labels, model_config, mode):
"""Creates a sequence labeling model."""
if mode != tf_estimator.ModeKeys.PREDICT:
if not model_config["multilabel"]:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
else:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
else:
loss = None
return loss
def model_fn_builder(runner_config, mode):
"""Returns `model_fn` closure for TPUEstimator."""
rel_module_path = "" # empty base dir
model = importlib.import_module(rel_module_path + runner_config["name"])
model_config = runner_config["model_config"]
return model.Encoder(model_config, mode)
def main(_):
runner_config = load_runner_config()
if FLAGS.output_dir:
tf.io.gfile.makedirs(FLAGS.output_dir)
train_model = model_fn_builder(runner_config, tf_estimator.ModeKeys.TRAIN)
optimizer = tf.keras.optimizers.Adam()
train_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.TRAIN,
drop_remainder=True)
params = {"batch_size": runner_config["batch_size"]}
train_ds = train_input_fn(params)
train_loss = tf.keras.metrics.Mean(name="train_loss")
@tf.function
def train_step(features):
with tf.GradientTape() as tape:
logits = train_model(features["projection"], features["seq_length"])
loss = compute_loss(logits, features["label"],
runner_config["model_config"],
tf_estimator.ModeKeys.TRAIN)
gradients = tape.gradient(loss, train_model.trainable_variables)
optimizer.apply_gradients(zip(gradients, train_model.trainable_variables))
train_loss(loss)
for epoch in range(1):
train_loss.reset_states()
for features in train_ds:
train_step(features)
step = optimizer.iterations.numpy()
if step % 100 == 0:
logging.info("Running step %s in epoch %s", step, epoch)
logging.info("Training loss: %s, epoch: %s, step: %s",
round(train_loss.result().numpy(), 4), epoch, step)
if __name__ == "__main__":
app.run(main)
| 3,869 | 32.947368 | 80 | py |
models | models-master/research/seq_flow_lite/trainer.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility for PRADO model to do train, eval, inference and model export."""
import importlib
import json
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import input_fn_reader # import root module
import metric_functions # import root module
tf.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string("config_path", None, "Path to a RunnerConfig.")
flags.DEFINE_enum("runner_mode", None, ["train", "train_and_eval", "eval"],
"Runner mode.")
flags.DEFINE_string("master", None, "TensorFlow master URL.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def load_runner_config():
with tf.gfile.GFile(FLAGS.config_path, "r") as f:
return json.loads(f.read())
def create_model(model, model_config, features, mode, model_name):
"""Creates a sequence labeling model."""
keras_model = model.Encoder(model_config, mode)
if any(model in model_name for model in ["pqrnn", "prado"]):
logits = keras_model(features["projection"], features["seq_length"])
else:
logits = keras_model(features["token_ids"], features["token_len"])
if mode != tf_estimator.ModeKeys.PREDICT:
if not model_config["multilabel"]:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=features["label"], logits=logits)
else:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=features["label"], logits=logits)
loss = tf.reduce_mean(loss)
loss += tf.add_n(keras_model.losses)
else:
loss = None
return (loss, logits)
def create_optimizer(loss, runner_config, params):
"""Returns a train_op using Adam optimizer."""
learning_rate = tf.train.exponential_decay(
learning_rate=runner_config["learning_rate"],
global_step=tf.train.get_global_step(),
decay_steps=runner_config["learning_rate_decay_steps"],
decay_rate=runner_config["learning_rate_decay_rate"],
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
if params["use_tpu"]:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
return optimizer.minimize(loss, global_step=tf.train.get_global_step())
def model_fn_builder(runner_config):
"""Returns `model_fn` closure for TPUEstimator."""
rel_module_path = "" # empty base dir
model = importlib.import_module(rel_module_path + runner_config["name"])
def model_fn(features, mode, params):
"""The `model_fn` for TPUEstimator."""
label_ids = None
if mode != tf_estimator.ModeKeys.PREDICT:
label_ids = features["label"]
model_config = runner_config["model_config"]
loss, logits = create_model(model, model_config, features, mode,
runner_config["name"])
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = create_optimizer(loss, runner_config, params)
return tf_estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
elif mode == tf_estimator.ModeKeys.EVAL:
if not runner_config["model_config"]["multilabel"]:
metric_fn = metric_functions.classification_metric
else:
metric_fn = metric_functions.labeling_metric
eval_metrics = (metric_fn, [loss, label_ids, logits])
return tf_estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=eval_metrics)
elif mode == tf_estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
if not runner_config["model_config"]["multilabel"]:
predictions["predictions"] = tf.nn.softmax(logits)
else:
predictions["predictions"] = tf.math.sigmoid(logits)
return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
assert False, "Expected to be called in TRAIN, EVAL, or PREDICT mode."
return model_fn
def main(_):
runner_config = load_runner_config()
if FLAGS.output_dir:
tf.gfile.MakeDirs(FLAGS.output_dir)
is_per_host = tf_estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf_estimator.tpu.RunConfig(
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=runner_config["save_checkpoints_steps"],
keep_checkpoint_max=20,
tpu_config=tf_estimator.tpu.TPUConfig(
iterations_per_loop=runner_config["iterations_per_loop"],
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(runner_config)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
batch_size = runner_config["batch_size"]
estimator = tf_estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size)
if FLAGS.runner_mode == "train":
train_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.TRAIN,
drop_remainder=True)
estimator.train(
input_fn=train_input_fn, max_steps=runner_config["train_steps"])
elif FLAGS.runner_mode == "eval":
# TPU needs fixed shapes, so if the last batch is smaller, we drop it.
eval_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.EVAL,
drop_remainder=True)
for _ in tf.train.checkpoints_iterator(FLAGS.output_dir, timeout=600):
result = estimator.evaluate(input_fn=eval_input_fn)
for key in sorted(result):
logging.info(" %s = %s", key, str(result[key]))
if __name__ == "__main__":
app.run(main)
| 6,640 | 35.489011 | 80 | py |
models | models-master/research/seq_flow_lite/models/pqrnn.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of pQRNN model."""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import projection_layers # import seq_flow_lite module
from layers import qrnn_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""A pQRNN keras model."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("projection_bottleneck_size")
_get_params("qrnn_state_size")
_get_params("qrnn_kernel_width", 3)
_get_params("qrnn_zoneout_probability")
_get_params("number_qrnn_layers")
_get_params("labels", [])
_get_params("num_labels", None)
_get_params("regularizer_scale")
_get_params("quantize")
self.num_classes = self.num_labels or len(self.labels)
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
units=self.projection_bottleneck_size,
rank=3,
parameters=self.parameters)
self.qrnn_stack = qrnn_layers.QRNNBidirectionalStack(
parameters=self.parameters,
zoneout_probability=self.qrnn_zoneout_probability,
kwidth=self.qrnn_kernel_width,
state_size=self.qrnn_state_size,
num_layers=self.number_qrnn_layers)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, projection, seq_length):
mask = tf.sequence_mask(
seq_length, tf.shape(projection)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
projection = projection * maskr3
bottleneck = self.bottleneck_layer(projection, maskr3, inverse_normalizer)
outputs = self.qrnn_stack(bottleneck, maskr3, inverse_normalizer)
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
class Model(Encoder):
def __init__(self, config, mode, **kwargs):
super(Model, self).__init__(config, mode, **kwargs)
self.projection = projection_layers.ProjectionLayer(config, mode)
def call(self, inputs):
projection, seq_length = self.projection(inputs)
return super(Model, self).call(projection, seq_length)
| 3,838 | 37.009901 | 80 | py |
models | models-master/research/seq_flow_lite/models/charformer.py | # Copyright 2022 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Charformer based model for in-training tokenization."""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from models import transformer_encoder # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""Encoder with GBST and Transformer layers."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("labels", [])
_get_params("regularizer_scale")
_get_params("quantize")
_get_params("feature_size")
_get_params("bottleneck_size")
self.max_seq_len = config.get("max_seq_len", 128)
self.gbst_max_token_len = config.get("gbst_max_token_len", 128)
# Including 3 additional special token ids (0=padding, 1=EOS, 2=UNK).
self.vocabulary_size = config.get("vocabulary_size", 259)
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.feature_size],
parameters=self.parameters)
self.gbst_downsample_rate = config.get("gbst_downsample_rate", 1)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.gbst_max_token_len, self.feature_size],
parameters=self.parameters)
self.ln = normalization_layers.LayerNormalization(
parameters=self.parameters)
self.qact = quantization_layers.ActivationQuantization(
parameters=self.parameters)
self.bottleneck_layer = None
gbst_size = self.feature_size
if self.bottleneck_size != self.feature_size:
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
self.bottleneck_size,
rank=3,
normalize=False,
activation=None,
parameters=self.parameters)
gbst_size = self.bottleneck_size
self.gbst_max_subword_block_width = config.get(
"gbst_max_subword_block_width", 5)
self.gbst_conv_kernel_size = config.get("gbst_conv_kernel_size", 5)
self.gbst_block_mixing_mode = config.get("gbst_block_mixing_mode", None)
self.gbst_layer = misc_layers.GBSTLayerV2(
feature_size=gbst_size,
max_seq_len=self.gbst_max_token_len,
downsample_rate=self.gbst_downsample_rate,
max_subword_block_width=self.gbst_max_subword_block_width,
conv_kernel_size=self.gbst_conv_kernel_size,
block_mixing_mode=self.gbst_block_mixing_mode,
parameters=self.parameters)
self.pool_windows = config.get("pool_windows", None)
if self.pool_windows:
self.transformer_encoder_layer = transformer_encoder.FunnelTransformerModel(
config, mode)
else:
self.transformer_encoder_layer = transformer_encoder.ModelWithEmbeddings(
config, mode)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
self.num_classes = len(self.labels)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, token_ids, seq_length):
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(token_ids), dtype=tf.int32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
else:
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(token_ids)[1], dtype=tf.float32)
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
input_values = self.embedding(token_ids)
pos_values = self.positional_embedding(pos_indices)
input_embeds = self.qact(self.ln(input_values + pos_values))
if self.bottleneck_layer is not None:
maskr3 = tf.expand_dims(mask_rank2, axis=2)
maskr3 = tf.cast(maskr3, tf.float32)
bottleneck_output = self.bottleneck_layer(input_embeds, maskr3)
else:
bottleneck_output = input_embeds
gbst_output = self.gbst_layer(bottleneck_output, seq_length)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(gbst_output)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
seq_length = seq_length / self.gbst_downsample_rate
if self.pool_windows:
outputs, mask = self.transformer_encoder_layer(gbst_output,
seq_length)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
pre_logits = self.attention_pool(outputs, mask, inverse_normalizer)
else:
outputs = self.transformer_encoder_layer(gbst_output, seq_length)
mask = tf.sequence_mask(
seq_length, tf.shape(outputs)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
| 6,598 | 41.850649 | 82 | py |
models | models-master/research/seq_flow_lite/models/byteqrnn.py | # Copyright 2022 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ByteQRNN based model for in-training tokenization.
Sample model params:
"feature_size": 128, # Embedding size for each byte
"gbst_max_token_len": 1024, # Max sequence length of bytes in GBST
"gbst_downsample_rate": 1, # Downsample factor for GBST output
"bottleneck_size": 128, # Bottleneck size before feeding to QRNN
"qrnn_state_size": 128, # QRNN layer param
"qrnn_kernel_width": 3, # QRNN layer param
"qrnn_zoneout_probability": 1e-2, # QRNN layer param
"distortion_probability": 0.25, # QRNN layer param
"number_qrnn_layers": 3, # QRNN layer param
"labels": [], # List of labels for getting num classes
"regularizer_scale": 1e-5, # L2 Regularization scale
"quantize": true, # Enable quantization
"multilabel": true, # If the output is Multilabel
"""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import qrnn_layers # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""Encoder with GBST and QRNN layers."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config.get(varname, default_value)
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("feature_size")
_get_params("bottleneck_size", self.feature_size)
_get_params("qrnn_state_size")
_get_params("qrnn_kernel_width", 3)
_get_params("qrnn_zoneout_probability")
_get_params("number_qrnn_layers")
_get_params("labels", [])
_get_params("regularizer_scale")
_get_params("quantize")
_get_params("gbst_max_token_len", 128)
_get_params("gbst_downsample_rate", 1)
_get_params("gbst_max_subword_block_width", 4)
_get_params("gbst_conv_kernel_size", 5)
_get_params("gbst_block_mixing_mode")
_get_params("gbst_add_block_pos_embed", False)
_get_params("attn_pool_output", True)
self.num_classes = len(config.get("labels", []))
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
# Including 3 additional special token ids (0=padding, 1=EOS, 2=UNK).
self.vocabulary_size = 259
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.feature_size],
parameters=self.parameters)
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
units=self.bottleneck_size,
rank=3,
parameters=self.parameters)
self.gbst_layer = misc_layers.GBSTLayerV2(
feature_size=self.bottleneck_size,
max_seq_len=self.gbst_max_token_len,
downsample_rate=self.gbst_downsample_rate,
max_subword_block_width=self.gbst_max_subword_block_width,
conv_kernel_size=self.gbst_conv_kernel_size,
block_mixing_mode=self.gbst_block_mixing_mode,
add_block_pos_embed=self.gbst_add_block_pos_embed,
parameters=self.parameters)
self.qrnn_stack = qrnn_layers.QRNNBidirectionalStack(
parameters=self.parameters,
zoneout_probability=self.qrnn_zoneout_probability,
kwidth=self.qrnn_kernel_width,
state_size=self.qrnn_state_size,
num_layers=self.number_qrnn_layers)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, token_ids, seq_length):
input_embeds = self.embedding(token_ids)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(input_embeds)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(input_embeds)[1], dtype=tf.float32)
maskr3 = tf.expand_dims(mask_rank2, axis=2)
gbst_input = self.bottleneck_layer(input_embeds, maskr3)
gbst_output = self.gbst_layer(gbst_input, seq_length)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(gbst_output)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
seq_length = seq_length / self.gbst_downsample_rate
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(gbst_output)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank2))
maskr3 = tf.expand_dims(mask_rank2, axis=2)
outputs = self.qrnn_stack(gbst_output, maskr3, inverse_normalizer)
if self.attn_pool_output:
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
else:
return outputs
| 6,033 | 41.195804 | 80 | py |
models | models-master/research/seq_flow_lite/models/transformer_encoder.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of pQRNN model."""
# pylint: disable=arguments-renamed
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import transformer_layers # import seq_flow_lite module
class Model(tf.keras.layers.Layer):
"""Quantized transformer encoder."""
def __init__(self, config, mode):
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("intermediate_size")
_get_params("max_time_step")
_get_params("embedding_size")
_get_params("vocabulary_size")
_get_params("num_layers")
_get_params("labels")
_get_params("regularizer_scale")
_get_params("num_heads")
_get_params("model_dimension")
_get_params("quantize")
_get_params("activation_dropout_rate", 0.0)
_get_params("attention_dropout_rate", 0.0)
self.parameters = base_layers.Parameters(mode, self.quantize,
self.regularizer_scale)
super(Model, self).__init__()
def build(self, input_shape):
self.transformer = transformer_layers.TransformerEncoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate)
def call(self, indices, sequence_length):
return self.transformer(indices, sequence_length)
class ModelWithEmbeddings(Model):
"""Quantized transformer encoder which takes embeddings instead of indices."""
def build(self, input_shape):
self.transformer_with_input_embedding = transformer_layers.TransformerEncoderStackWithInputEmbedding(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate)
def call(self, embeddings, sequence_length):
return self.transformer_with_input_embedding(embeddings, sequence_length)
class FunnelTransformerModel(Model):
"""Quantized transformer encoder which takes embeddings instead of indices."""
def __init__(self, config, mode):
self.pool_windows = config.get("pool_windows", None)
super(FunnelTransformerModel, self).__init__(config, mode)
def build(self, input_shape):
self.funnel_transformer = transformer_layers.FunnelTransformerEncoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
pool_windows=self.pool_windows)
def call(self, embeddings, sequence_length):
return self.funnel_transformer(embeddings, sequence_length)
| 4,408 | 38.017699 | 105 | py |
models | models-master/research/seq_flow_lite/models/transformer_uniform_attn_decoder.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Transformer decoder model."""
import math
from absl import logging
from tensor2tensor.utils import beam_search
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from layers import transformer_layers # import seq_flow_lite module
class TransformerUniformAttnDecoder(base_layers.BaseLayer):
"""Transformer Uniform Attention Decoder."""
def __init__(self,
model_dimension,
max_time_step,
num_heads,
intermediate_size,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
beam_size=1,
cached_kv=False,
**kwargs):
self.model_dimension = model_dimension
self.decoder_uniform_attn = transformer_layers.DecoderUniformAttention(
model_dimension,
max_time_step,
attention_dropout_rate=attention_dropout_rate,
beam_size=beam_size,
**kwargs)
self.multihead_cross_attn = transformer_layers.DecoderMultiheadAttention(
model_dimension,
num_heads,
cached_kv=cached_kv,
attention_dropout_rate=attention_dropout_rate,
**kwargs)
self.prx = dense_layers.BaseQDense(
model_dimension, activation=None, normalize=False, bias=False, **kwargs)
self.upprx = dense_layers.BaseQDense(
intermediate_size, normalize=False, **kwargs)
self.downprx = dense_layers.BaseQDense(
model_dimension, activation=None, normalize=False, **kwargs)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q0 = quantization_layers.ActivationQuantization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
super(TransformerUniformAttnDecoder, self).__init__(**kwargs)
def call(self,
dec_inputs,
dec_mask,
dec_inverse_normalizer,
enc_output,
enc_mask,
enc_inverse_normalizer,
cross_attn_mask=None,
step=None,
selected_beams=None,
cache=None):
batch_size = self.get_batch_dimension(dec_inputs)
self._assert_rank_and_type(dec_inputs, 3)
self._assert_rank_and_type(dec_mask, 3)
assert dec_inputs.get_shape().as_list()[-1] == self.model_dimension
self_attn_output = self.decoder_uniform_attn(
dec_inputs,
dec_mask,
dec_inverse_normalizer,
step=step,
beam_indices=selected_beams,
cache=cache)
cross_attn_output = self.multihead_cross_attn(dec_inputs, dec_mask,
dec_inverse_normalizer,
enc_output, enc_mask,
enc_inverse_normalizer,
cross_attn_mask)
layer_out = self.q0(cross_attn_output + self_attn_output)
layer_out = tf.reshape(layer_out, [-1, self.model_dimension])
layer_out = self.prx(layer_out)
if self.parameters.mode == base_layers.TRAIN:
layer_out = tf.nn.dropout(layer_out, rate=self.activation_dropout_rate)
dec_inputs = tf.reshape(dec_inputs, [-1, self.model_dimension])
dec_inputs_updated = self.q1(self.ln1(dec_inputs + layer_out))
# Feed forward network.
layer_out = self.upprx(dec_inputs_updated)
layer_out = self.downprx(layer_out)
if self.parameters.mode == base_layers.TRAIN:
layer_out = tf.nn.dropout(layer_out, rate=self.activation_dropout_rate)
outputs = self.q2(self.ln2(dec_inputs_updated + layer_out))
return tf.reshape(outputs, [batch_size, -1, self.model_dimension])
class TransformerUniformAttnDecoderStack(base_layers.BaseLayer):
"""TransformerUniformAttnDecoderStack Decoder."""
def __init__(self,
num_layers,
max_time_step,
vocabulary_size,
embedding_size,
model_dimension,
num_heads,
intermediate_size,
beam_size=1,
activation_dropout_rate=0.1,
attention_dropout_rate=0.0,
cached_kv=False,
**kwargs):
super(TransformerUniformAttnDecoderStack, self).__init__(**kwargs)
self.max_time_step = max_time_step
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
self.activation_dropout_rate = activation_dropout_rate
self.layers = []
for _ in range(num_layers):
self.layers.append(
TransformerUniformAttnDecoder(
model_dimension=model_dimension,
max_time_step=max_time_step,
num_heads=num_heads,
intermediate_size=intermediate_size,
beam_size=beam_size,
cached_kv=cached_kv,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
def call(self,
dec_inputs,
dec_mask,
enc_output,
enc_mask,
step=None,
selected_beams=None,
cache=None):
self._assert_rank_and_type(dec_mask, 2)
self._assert_rank_and_type(enc_mask, 2)
dec_mask_rank3 = tf.expand_dims(dec_mask, axis=2)
dec_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(dec_mask_rank3))
enc_mask_rank3 = tf.expand_dims(enc_mask, 1)
enc_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(enc_mask_rank3))
cross_attn_mask = enc_mask_rank3
layer_in = dec_inputs
if self.parameters.mode == base_layers.TRAIN:
layer_in = tf.nn.dropout(layer_in, rate=self.activation_dropout_rate)
enc_output_feature_dim = enc_output.get_shape().as_list()[2]
enc_output = tf.reshape(enc_output, [-1, enc_output_feature_dim])
for i, layer in enumerate(self.layers):
layer_cache = cache["layer_%d" % i] if cache is not None else None
layer_in = layer(
layer_in,
dec_mask_rank3,
dec_inverse_normalizer,
enc_output,
enc_mask,
enc_inverse_normalizer,
cross_attn_mask,
step=step,
selected_beams=selected_beams,
cache=layer_cache)
return layer_in
class Model(tf.keras.layers.Layer):
"""Quantized transformer decoder."""
def __init__(self, config, mode):
super(Model, self).__init__()
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("intermediate_size")
_get_params("max_dec_time_step")
_get_params("max_enc_time_step")
_get_params("embedding_size")
_get_params("vocabulary_size")
_get_params("num_layers")
_get_params("labels")
_get_params("regularizer_scale")
_get_params("num_heads")
_get_params("model_dimension")
_get_params("beam_size", 1)
_get_params("quantize", True)
_get_params("cached_kv", False)
_get_params("attention_dropout_rate", 0.0)
_get_params("activation_dropout_rate", 0.0)
# If set, a separate dense layer is used to generate the logits instead of
# re-using the input embedding table.
_get_params("use_output_layer", False)
self.parameters = base_layers.Parameters(mode, self.quantize,
self.regularizer_scale)
# Activation/Normalization enabled on input bottleneck as there is no
# temporal information.
self.input_bottleneck = dense_layers.BaseQDenseVarLen(
self.model_dimension, rank=3, parameters=self.parameters)
self.output_bottleneck = dense_layers.BaseQDense(
self.embedding_size,
normalize=False,
activation=None,
bias=False,
parameters=self.parameters)
self.embedding = embedding_layers.EmbeddingFullyConnected(
shape=[self.vocabulary_size, self.embedding_size],
initializer=tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3)),
parameters=self.parameters)
if self.use_output_layer:
self.output_layer = dense_layers.BaseQDense(
self.vocabulary_size,
activation=None,
normalize=False,
bias=False,
parameters=self.parameters)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_dec_time_step, self.model_dimension],
initializer=tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3)),
parameters=self.parameters)
self.ln = normalization_layers.LayerNormalization(
parameters=self.parameters)
self.qact = quantization_layers.ActivationQuantization(
parameters=self.parameters)
# Scales the weights for computing logits.
self.logits_fc_weights_scale_factor = None
self.logits_fc_bias = self.add_weight(
"logits_fc_bias",
shape=[self.vocabulary_size],
initializer=tf.constant_initializer(0),
dtype="float32")
# Optional bias which can be used to mask logits output.
self.output_bias = None
self.transformer_uniform_attn_decoder = TransformerUniformAttnDecoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_dec_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
beam_size=self.beam_size,
cached_kv=self.cached_kv,
attention_dropout_rate=self.attention_dropout_rate,
activation_dropout_rate=self.activation_dropout_rate)
# Beam search output.
self.finished_seq = None
self.finished_scores = None
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
if self.parameters.mode == base_layers.TRAIN:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
elif self.parameters.mode in [base_layers.EVAL, base_layers.PREDICT]:
logits, predicted_ids = self.decode_beam_search(start_ids, eos_id, pad_id,
enc_output, enc_mask)
elif self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
else:
assert "Invalid mode."
return logits, predicted_ids
def training_inputs(self, input_ids, input_mask):
input_values = self.embedding(input_ids)
if self.embedding_size != self.model_dimension:
input_values = self.input_bottleneck(input_values, input_mask)
pos_indices = tf.cumsum(input_mask, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
pos_values = self.positional_embedding(pos_indices)
inputs = self.qact(self.ln(input_values + pos_values))
return inputs
def model_outputs(self, layer_in):
bsz = layer_in.get_shape().as_list()[0] or tf.shape(layer_in)[0]
layer_out = tf.reshape(layer_in, [-1, self.model_dimension])
if self.use_output_layer:
logits = self.output_layer(layer_out)
else:
if self.model_dimension != self.embedding_size:
layer_out = self.output_bottleneck(layer_out)
logits = self.embedding.fully_connected(
layer_out,
bias=self.logits_fc_bias,
weights_scale_factor=self.logits_fc_weights_scale_factor)
logits = tf.reshape(logits, [bsz, -1, self.vocabulary_size])
# Optional bias to mask out logits before applying argmax.
if self.output_bias is not None:
logits += self.output_bias
predicted_ids = tf.argmax(logits, axis=2, output_type=tf.int64)
return logits, predicted_ids
def decode_beam_search(self,
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model"):
batch_size = tf.shape(start_ids)[0]
cache = { # pylint: disable=g-complex-comprehension
"layer_%d" % layer: {
"uniform_avg": tf.zeros([batch_size, 1, self.model_dimension]),
} for layer in range(self.num_layers)
}
cache["logits"] = tf.zeros([batch_size, 0, self.vocabulary_size])
pos_indices = tf.range(self.max_dec_time_step, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
pos_values = self.positional_embedding(pos_indices)
def beam_search_tile(output, tile_pattern, final_shape):
x = tf.tile(output, tile_pattern)
x = tf.reshape(x, final_shape)
return x
enc_output_feature_dim = enc_output.get_shape().as_list()[2]
enc_output = beam_search_tile(
enc_output, [1, self.beam_size, 1],
[batch_size * self.beam_size, -1, enc_output_feature_dim])
enc_mask = beam_search_tile(enc_mask, [1, self.beam_size],
[batch_size * self.beam_size, -1])
def symbols_to_logits_fn(ids, step, cache):
"""Looks up ids to logits."""
logging.info("Running symbols to logits. ids=%s, step=%s, cache=%s", ids,
step, cache)
curr_id = ids[:, -1:]
with tf.name_scope(scope):
curr_embed = self.embedding(curr_id)
input_mask = tf.ones(tf.shape(curr_embed)[:-1], dtype=tf.float32)
if self.embedding_size != self.model_dimension:
curr_embed = self.input_bottleneck(curr_embed, input_mask)
inputs = self.qact(
self.ln(curr_embed + pos_values[:, step:step + 1, :]))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=step + 1,
cache=cache)
next_logits, _ = self.model_outputs(layer_out)
cache["logits"] = tf.concat([cache["logits"], next_logits], axis=1)
return next_logits, cache
self.finished_seq, self.finished_scores, states = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids=start_ids,
beam_size=self.beam_size,
decode_length=self.max_dec_time_step,
vocab_size=self.vocabulary_size,
alpha=0.6,
eos_id=eos_id,
states=cache)
beam_ids = self.finished_seq[:, 0, 1:]
beam_ids = tf.pad(
beam_ids, [[0, 0], [0, self.max_dec_time_step - tf.shape(beam_ids)[1]]],
constant_values=pad_id)
logits = states["logits"][:, 0, :, :]
logits = tf.pad(
logits,
[[0, 0], [0, self.max_dec_time_step - tf.shape(logits)[1]], [0, 0]],
constant_values=self.parameters.invalid_logit)
return logits, beam_ids
class ModelEvalWithGTLogitsAndPredictions(Model):
"""Model with EVAL mode logits and predictions based on ground truth inputs at each step."""
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
elif self.parameters.mode == base_layers.PREDICT:
logits, predicted_ids = self.decode_beam_search(
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model_eval_with_gt_logits_and_predictions")
elif self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
else:
assert "Invalid mode."
return logits, predicted_ids
class ModelEvalWithGTLogits(Model):
"""Model with EVAL mode logits computed based on ground truth input at each step."""
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
logits = None
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
if self.parameters.mode in [base_layers.EVAL, base_layers.PREDICT]:
# EVAL mode predictions are based on beam search path.
_, predicted_ids = self.decode_beam_search(
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model_eval_with_gt_logits")
if self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
return logits, predicted_ids
| 20,439 | 38.535783 | 94 | py |
models | models-master/research/seq_flow_lite/models/prado.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of PRADO model."""
import copy
from absl import logging
import numpy as np
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import projection_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class PaddedMaskedVarLenConv(conv_layers.EncoderQConvolutionVarLen):
"""A layer that performs padded masked convolution."""
def __init__(self, invalid_value, ngram=2, skip_bigram=None, **kwargs):
self.invalid_value = invalid_value
assert ngram is None or (ngram >= 1 and ngram <= 5)
assert skip_bigram is None or skip_bigram == 1 or skip_bigram == 2
assert bool(ngram is None) != bool(skip_bigram is None)
self.kwidth = ngram if ngram is not None else (skip_bigram + 2)
mask = [1] * self.kwidth
self.skipgram = skip_bigram is not None
if skip_bigram is not None:
mask[1], mask[skip_bigram] = 0, 0
self.mask = np.array(mask, dtype="float32").reshape((1, self.kwidth, 1, 1))
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=[0, self.kwidth - 1])
super(PaddedMaskedVarLenConv, self).__init__(
ksize=self.kwidth, rank=3, padding="VALID", activation=None, **kwargs)
def call(self, inputs, mask, inverse_normalizer):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
maskr4 = tf.expand_dims(mask, axis=1)
inputs_padded = self.zero_pad(inputs)
result = super(PaddedMaskedVarLenConv, self).call(inputs_padded, maskr4,
inverse_normalizer)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
return result * mask + (1 - mask) * self.invalid_value
return result
def quantize_parameter(self, weight, num_bits=8):
weight = super(PaddedMaskedVarLenConv, self).quantize_parameter(
weight, num_bits=num_bits)
return weight * tf.convert_to_tensor(self.mask) if self.skipgram else weight
class AttentionPoolReduce(base_layers.BaseLayer):
"""Attention pooling and reduce."""
def __init__(self, filters, ngram=2, skip_bigram=None, **kwargs):
super(AttentionPoolReduce, self).__init__(**kwargs)
self.filters = filters
self.value = PaddedMaskedVarLenConv(
0, filters=filters, ngram=ngram, skip_bigram=skip_bigram, **kwargs)
self.attention_logits = PaddedMaskedVarLenConv(
self.parameters.invalid_logit,
filters=filters,
ngram=ngram,
skip_bigram=skip_bigram,
**kwargs)
def call(self, values_in, attention_in, mask, inverse_normalizer):
self._assert_rank_and_type(values_in, 3)
self._assert_rank_and_type(attention_in, 3)
self._assert_rank_and_type(mask, 3)
values = self.value(values_in, mask, inverse_normalizer)
attention_logits = self.attention_logits(attention_in, mask,
inverse_normalizer)
if self.parameters.mode == base_layers.TFLITE:
return tf_custom_ops_py.expected_value_op(attention_logits, values)
else:
attention_logits = tf.transpose(attention_logits, [0, 2, 1])
values = tf.transpose(values, [0, 2, 1])
attention = tf.nn.softmax(attention_logits)
return tf.reduce_sum(attention * values, axis=2)
class Encoder(tf.keras.layers.Layer):
"""A PRADO keras model."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("labels")
_get_params("quantize", True)
_get_params("embedding_regularizer_scale", 35e-3)
_get_params("embedding_size", 64)
_get_params("unigram_channels", 0)
_get_params("bigram_channels", 0)
_get_params("trigram_channels", 0)
_get_params("fourgram_channels", 0)
_get_params("fivegram_channels", 0)
_get_params("skip1bigram_channels", 0)
_get_params("skip2bigram_channels", 0)
_get_params("network_regularizer_scale", 1e-4)
_get_params("keep_prob", 1.0)
self.num_classes = len(self.labels)
self.parameters = base_layers.Parameters(
mode,
quantize=self.quantize,
regularizer_scale=self.embedding_regularizer_scale)
self.values_fc = dense_layers.BaseQDenseVarLen(
units=self.embedding_size, rank=3, parameters=self.parameters)
self.attention_fc = dense_layers.BaseQDenseVarLen(
units=self.embedding_size, rank=3, parameters=self.parameters)
self.parameters = copy.copy(self.parameters)
self.parameters.regularizer_scale = self.network_regularizer_scale
self.attention_pool_layers = []
self._add_attention_pool_layer(self.unigram_channels, 1)
self._add_attention_pool_layer(self.bigram_channels, 2)
self._add_attention_pool_layer(self.trigram_channels, 3)
self._add_attention_pool_layer(self.fourgram_channels, 4)
self._add_attention_pool_layer(self.fivegram_channels, 5)
self._add_attention_pool_layer(self.skip1bigram_channels, None, 1)
self._add_attention_pool_layer(self.skip2bigram_channels, None, 2)
self.concat_quantizer = quantization_layers.ConcatQuantization(
axis=1, parameters=self.parameters)
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def _add_attention_pool_layer(self, channels, ngram, skip_bigram=None):
if channels > 0:
self.attention_pool_layers.append(
AttentionPoolReduce(
filters=channels,
skip_bigram=skip_bigram,
ngram=ngram,
parameters=self.parameters))
def _apply_fc_dropout(self, layer, inputs, mask, inverse_normalizer):
outputs = layer(inputs, mask, inverse_normalizer)
if self.parameters.mode == base_layers.TRAIN and self.keep_prob < 1.0:
return tf.nn.dropout(outputs, rate=(1 - self.keep_prob))
return outputs
def call(self, projection, seq_length):
mask = tf.sequence_mask(
seq_length, tf.shape(projection)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
values_in = self._apply_fc_dropout(self.values_fc, projection, mask,
inverse_normalizer)
attention_in = self._apply_fc_dropout(self.attention_fc, projection, mask,
inverse_normalizer)
tensors = [
layer(values_in, attention_in, maskr3, inverse_normalizer)
for layer in self.attention_pool_layers
]
assert tensors, "no ngram channels have been configured"
pre_logits = self.concat_quantizer(tensors)
return self.final_fc(pre_logits)
class Model(Encoder):
def __init__(self, config, mode, **kwargs):
super(Model, self).__init__(config, mode, **kwargs)
self.projection = projection_layers.ProjectionLayer(config, mode)
def call(self, inputs):
projection, seq_length = self.projection(inputs)
return super(Model, self).call(projection, seq_length)
| 8,180 | 40.739796 | 80 | py |
models | models-master/research/seq_flow_lite/models/sgnn/sgnn_test.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for seq_flow_lite.sgnn."""
import tensorflow as tf
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
from models import sgnn # import seq_flow_lite module
@test_util.run_all_in_graph_and_eager_modes
class SgnnTest(tf.test.TestCase):
def test_preprocess(self):
self.assertAllEqual(
sgnn.preprocess(
tf.constant([['Hello World!'], [u'你好'],
[u'مرحبا بالعالم']])),
[['hello'.encode(), 'world!'.encode()], [u'你好'.encode()],
[u'مرحبا'.encode(), u'بالعالم'.encode()]])
def test_get_ngram(self):
tokens = tf.ragged.constant([['hello', 'world'], [u'你好'],
[u'مرحبا', u'بالعالم']])
self.assertAllEqual(
sgnn.get_ngrams(tokens, 3),
[[
b'^he', b'hel', b'ell', b'llo', b'lo$', b'^wo', b'wor', b'orl',
b'rld', b'ld$'
], [u'^你好'.encode(), u'你好$'.encode()],
[
u'^مر'.encode(), u'مرح'.encode(), u'رحب'.encode(),
u'حبا'.encode(), u'با$'.encode(), u'^با'.encode(),
u'بال'.encode(), u'الع'.encode(), u'لعا'.encode(),
u'عال'.encode(), u'الم'.encode(), u'لم$'.encode()
]])
def test_project(self):
ngrams = tf.ragged.constant([[b'^h', b'he', b'el', b'll', b'lo', b'o$'],
[b'^h', b'hi', b'i$']])
self.assertAllClose(
sgnn.fused_project(ngrams, [5, 7], 0x7FFFFFFF),
[[0.448691, -0.238499], [-0.037561, 0.080748]])
self.assertAllClose(
sgnn.fused_project(ngrams, [5, 7], 0x7FFFFFFF),
sgnn.project(ngrams, [5, 7], 0x7FFFFFFF))
def test_sgnn(self):
self.assertAllClose(
sgnn.sgnn(tf.constant([['hello'], ['hi']]), [3, 5, 7], 2),
[[0.268503, 0.448691, -0.238499], [0.093143, -0.037561, 0.080748]])
def test_keras_model(self):
hparams = sgnn.Hparams(learning_rate=2e-4)
model = sgnn.keras_model([1, 2, 3, 4], 2, [100, 50], hparams)
self.assertIsNotNone(model)
if __name__ == '__main__':
tf.test.main()
| 2,771 | 36.972603 | 95 | py |
models | models-master/research/seq_flow_lite/models/sgnn/sgnn.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds SGNN model.
[1] Sujith Ravi and Zornitsa Kozareva. 2018. "Self-governing neural networks for
on-device short text
classification." In Proceedings of the 2018 Conference on Empirical Methods in
Natural Language
Processing, pages 887-893. Association for Computational Linguistics
The model will be constructed in this way:
* Projects text to float features, the size is defined by projection_size
* Fully connected layer predicts the class of predictions.
"""
import collections
import tensorflow.compat.v2 as tf
import tensorflow_text as tf_text
from tensorflow_lite_support.custom_ops.python import tflite_text_api
# Hparam collections that will be used to tune the model.
Hparams = collections.namedtuple(
'Hparams',
[
# Learning rate for the optimizer.
'learning_rate'
])
def preprocess(text):
"""Normalize the text, and return tokens."""
text = tf.reshape(text, [-1])
text = tf_text.case_fold_utf8(text)
tokenizer = tflite_text_api.WhitespaceTokenizer()
return tokenizer.tokenize(text)
def get_ngrams(tokens, n):
"""Generates character ngrams from tokens.
Args:
tokens: A string ragged tensor for tokens, in shape of [batch_size,
num_token].
n: ngram size for char ngrams.
Returns:
A string ragged tensor for ngrams, in shape of [batch_size, num_token,
ngrams].
"""
chars_split = tf.strings.unicode_split('^' + tokens + '$', 'UTF-8')
chars_joined = tflite_text_api.ngrams(
chars_split,
width=n,
axis=-1,
reduction_type=tf_text.Reduction.STRING_JOIN,
string_separator='')
flat_row_splits = tf.nn.embedding_lookup(chars_joined.values.row_splits,
chars_joined.row_splits)
return tf.RaggedTensor.from_row_splits(chars_joined.values.values,
flat_row_splits)
def project(ngrams, hash_seed, buckets):
"""Projects a ngram RaggedTensor to float tensor.
Args:
ngrams: A string ragged tensor, in shape of [batch_size, num_token, ngrams].
hash_seed: A python int list, in shape of [num_hash].
buckets: An int for the max value of projected integers.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, num_hash].
"""
num_hash = len(hash_seed)
# Hash ngrams string tensor to hash signatures.
signatures = tf.ragged.map_flat_values(tf.strings.to_hash_bucket_fast, ngrams,
buckets)
# Each ngram signature will be multiplied by a different hash seed,
# mod by hash buckets, and linear mapping.
# value = abs(signature * seed % bucket)
# if value > bucket / 2: value -= buckets
hash_tensor = tf.constant(hash_seed, dtype=tf.int64)
value = tf.math.floormod(
tf.abs(signatures.values * tf.reshape(hash_tensor, [-1, 1])), buckets)
value = value - tf.cast(tf.greater(value, buckets >> 1), tf.int64) * buckets
# Wrap values to ragged tensor, and calculates
# output_i,j = mean(value_i,j,k) for k-th ngram in i-th text
# computed with j-th hash seed
row_lengths = tf.repeat(
tf.reshape(signatures.row_lengths(), [1, -1]), num_hash, axis=0)
row_lengths = tf.cast(tf.reshape(row_lengths, [-1]), tf.int32)
result = tf.RaggedTensor.from_row_lengths(
tf.RaggedTensor.from_row_lengths(tf.reshape(value, [-1]), row_lengths),
tf.repeat(tf.shape(signatures.row_lengths()), num_hash))
result = tf.reduce_mean(result, 2) / (buckets >> 1)
return tf.transpose(tf.reshape(result.values, [num_hash, -1]))
def fused_project(ngrams, hash_seed, buckets):
"""A wrapper to fuse project method when converting to TFLite model.
Args:
ngrams: A string ragged tensor, in shape of [batch_size, num_token, ngrams].
hash_seed: A python int list, in shape of [num_hash].
buckets: An int for the max value of projected integers.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, num_hash].
"""
hash_seed_attr = ' '.join(['i: %d' % seed for seed in hash_seed])
experimental_implements = [
'name: "tftext:custom:SgnnProjection"',
'attr { key: "hash_seed" value { list {%s} } }' % hash_seed_attr,
'attr { key: "buckets" value { i: %d } }' % buckets,
]
experimental_implements = ' '.join(experimental_implements)
@tf.function(experimental_implements=experimental_implements)
def func(ngrams_values, *ngrams_row_splits):
ngrams = tf.RaggedTensor.from_nested_row_splits(
flat_values=ngrams_values, nested_row_splits=ngrams_row_splits)
return project(ngrams, hash_seed, buckets)
return func(ngrams.flat_values, *ngrams.nested_row_splits)
def sgnn(texts, hash_seed, ngram_size):
"""Projects the string text to float features.
It first generasts N ngrams of the tokens from given text,
then projects each ngram tensor with a partion of the seeds.
Args:
texts: a string tensor, in shape of [batch_size].
hash_seed: a list of integers, in shape of [projection_size].
ngram_size: max size of ngram to generate features.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, projection_size].
"""
projection_size = len(hash_seed)
partition_size = int(projection_size / ((ngram_size + 1) * ngram_size / 2))
if partition_size == 0:
raise ValueError(
'projection size %d is not enough for %d ngram partitions' %
(projection_size, ngram_size))
indices = [int(i * (i + 1) / 2) * partition_size for i in range(ngram_size)]
indices.append(projection_size)
projection_layer = []
tokens = preprocess(texts)
for i in range(ngram_size):
ngram = get_ngrams(tokens, i + 1)
projection = fused_project(ngram, hash_seed[indices[i]:indices[i + 1]],
0x7FFFFFFF)
projection_layer.append(projection)
return tf.cast(tf.concat(projection_layer, -1), tf.float32)
class ProjectLayer(tf.keras.layers.Layer):
"""Projects the texts to a fixed sized features."""
def __init__(self, seed, ngram_size, **kwargs):
self.seed = seed
self.ngram_size = ngram_size
super(ProjectLayer, self).__init__(**kwargs)
def get_config(self):
return {
'seed': self.seed,
'ngram_size': self.ngram_size,
}
def call(self, x):
return sgnn(x, self.seed, self.ngram_size)
def compute_output_shape(self, input_shape):
return (input_shape[0], len(self.seed))
def keras_model(hash_seed, ngram_size, fc_size_list, hparams):
"""Compiles a keras model from projected features to labels.
Args:
hash_seed: a list of int used to project the feature.
ngram_size: maximum size of ngram to generate features from texts.
fc_size_list: a list of int, sizes of each fully connected layer.
hparams: hyper parameters for the model.
Returns:
A keras model that predicts the language id.
"""
if not fc_size_list:
raise ValueError(
'Must specify one or more fully connected layers via fc_size_list')
model = tf.keras.Sequential()
model.add(ProjectLayer(hash_seed, ngram_size))
for size in fc_size_list[:-1]:
model.add(tf.keras.layers.Dense(size))
model.add(tf.keras.layers.Dense(fc_size_list[-1], activation='softmax'))
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=hparams.learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
| 8,235 | 35.281938 | 80 | py |
models | models-master/research/seq_flow_lite/models/sgnn/train.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to train langid model.
The script builds language detection from wikipedia dataset,
builds SGNN model to train an on-device model to
predict the language of the given text.
"""
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from models import sgnn # import seq_flow_lite module
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', '/tmp/langid',
'Path for the output directory.')
flags.DEFINE_integer('projection_size', 600, 'Size of projection layer.')
flags.DEFINE_integer('ngram_size', 3, 'Max size of ngram to project features.')
flags.DEFINE_string('fc_layer', '256,128',
'Size of fully connected layer, separated by comma.')
flags.DEFINE_integer('batch_size', 160, 'Batch size for training.')
flags.DEFINE_integer('epochs', 10, 'Num of epochs for training.')
flags.DEFINE_float('learning_rate', 2e-4, 'learning rate for optimizer.')
LANGIDS = ['ar', 'en', 'es', 'fr', 'ru', 'zh']
def dataset_fn(batch_size, is_training, split, try_gcs, max_input_len):
"""Creates dataset to train and evaluate.
Args:
batch_size: Batch size for training or evaluation.
is_training: True if the dataset is for training.
split: Split of dataset, follow the pattern defined in
https://www.tensorflow.org/datasets/splits
try_gcs: True if loading the data from gcs.
max_input_len: Max length of input string.
Returns:
Dataset object.
"""
def _get_text(item):
return tf.strings.substr(item['text'], 0, max_input_len)
all_data = []
for idx, langid in enumerate(LANGIDS):
dataset = tfds.load(
'wikipedia/20190301.%s' % langid, try_gcs=try_gcs, split=split)
map_fn = lambda item: (_get_text(item), idx) # pylint: disable=cell-var-from-loop
dataset = dataset.map(map_fn)
all_data.append(dataset)
datasets = tf.data.experimental.sample_from_datasets(
all_data, [1. / len(all_data)] * len(LANGIDS))
repeat_count = None if is_training else 1
return datasets.cache().shuffle(100000).batch(batch_size).repeat(repeat_count)
def save_and_convert(model, output_dir):
"""Save keras model and convert to tflite."""
saved_model_path = os.path.join(output_dir, 'saved_model')
tf.saved_model.save(model, saved_model_path)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
converter.allow_custom_ops = True
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
data = converter.convert()
with open(os.path.join(output_dir, 'model.tflite'), 'wb') as f:
f.write(data)
def train_and_evaluate():
"""Train and evaluate the model."""
hash_seed = np.random.uniform(-1, 1, FLAGS.projection_size) * 0x7FFFFFFF
fc_layer = [int(fc) for fc in FLAGS.fc_layer.split(',')]
fc_layer.append(len(LANGIDS) + 1)
hparams = sgnn.Hparams(learning_rate=FLAGS.learning_rate)
model = sgnn.keras_model(hash_seed, FLAGS.ngram_size, fc_layer, hparams)
model.fit(
dataset_fn(FLAGS.batch_size, True, 'train[:10%]', True, 100),
epochs=FLAGS.epochs,
steps_per_epoch=1000,
validation_steps=100,
validation_data=dataset_fn(FLAGS.batch_size, False, 'train[10:11%]', True,
100),
)
save_and_convert(model, FLAGS.output_dir)
def main(_):
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
train_and_evaluate()
if __name__ == '__main__':
app.run(main)
| 4,226 | 33.647541 | 86 | py |
models | models-master/research/seq_flow_lite/models/sgnn/run_tflite.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to run a langid TFLite model."""
from absl import app
from absl import flags
import numpy as np
from tensorflow.lite.python import interpreter as interpreter_wrapper # pylint: disable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
flags.DEFINE_string('model', '/tmp/langid/model.tflite',
'Path to LangID TFLite model.')
LANGIDS = ['ar', 'en', 'es', 'fr', 'ru', 'zh', 'unk']
def main(argv):
with open(FLAGS.model, 'rb') as file:
model = file.read()
interpreter = interpreter_wrapper.InterpreterWithCustomOps(
model_content=model,
custom_op_registerers=[
'AddWhitespaceTokenizerCustomOp', 'AddNgramsCustomOp',
'AddSgnnProjectionCustomOp',
])
interpreter.resize_tensor_input(0, [1, 1])
interpreter.allocate_tensors()
input_string = ' '.join(argv[1:])
print('Input: "{}"'.format(input_string))
input_array = np.array([[input_string]], dtype=str)
interpreter.set_tensor(interpreter.get_input_details()[0]['index'],
input_array)
interpreter.invoke()
output = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
for x in range(output.shape[0]):
for y in range(output.shape[1]):
print('{:>3s}: {:.4f}'.format(LANGIDS[y], output[x][y]))
if __name__ == '__main__':
app.run(main)
| 2,014 | 35.636364 | 115 | py |
models | models-master/research/seq_flow_lite/layers/base_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base layer for building models trained with quantization."""
import tensorflow as tf
TRAIN = "train"
EVAL = "eval"
PREDICT = "infer"
TFLITE = "tflite"
_MODE = [TRAIN, EVAL, PREDICT, TFLITE]
class Parameters:
"""A class that encapsulates parameters."""
def __init__(self,
mode,
quantize=True,
regularizer_scale=0.0,
invalid_logit=-1e6,
initializer=None):
assert isinstance(quantize, bool)
self.quantize = quantize
assert mode in _MODE
self.mode = mode
self.regularizer_scale = regularizer_scale
self.invalid_logit = invalid_logit
self.initializer = initializer
class BaseLayer(tf.keras.layers.Layer):
"""Base class for encoders."""
def __init__(self, parameters, **kwargs):
assert isinstance(parameters, Parameters)
self.parameters = parameters
super(BaseLayer, self).__init__(**kwargs)
def _assert_rank_and_type(self, tensor, rank, dtype=tf.float32):
assert len(tensor.get_shape().as_list()) == rank
assert tensor.dtype == dtype
def add_weight_wrapper(self, shape):
"""Return a weight variable for the given shape."""
if self.parameters.initializer is not None:
initializer = clone_initializer(self.parameters.initializer)
else:
initializer = tf.keras.initializers.GlorotUniform()
weight = self.add_weight(
"weight",
shape,
initializer=initializer,
trainable=True,
dtype=tf.float32)
self.add_reg_loss(weight)
return weight
def quantize_parameter(self, tensor, num_bits=8):
"""Quantize parameters when enabled."""
# For infer mode, toco computes the min/max from the weights offline to
# quantize it. During train/eval this is computed from the current value
# in the session by the graph itself.
if self.parameters.quantize and self.parameters.mode in [TRAIN, EVAL]:
# Toco expects 0.0 to be part of the quantization range.
batch_min = tf.minimum(tf.reduce_min(tensor), 0.0)
batch_max = tf.maximum(tf.reduce_max(tensor), 0.0)
return tf.quantization.fake_quant_with_min_max_vars(
tensor, batch_min, batch_max, num_bits=num_bits)
else:
return tensor
def add_bias(self, shape):
weight = self.add_weight(
"bias",
shape,
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.add_reg_loss(weight)
return weight
def add_reg_loss(self, weight):
if self.parameters.regularizer_scale > 0.0:
reg_scale = tf.convert_to_tensor(self.parameters.regularizer_scale)
reg_loss = tf.nn.l2_loss(weight) * reg_scale
self.add_loss(reg_loss)
def assign_moving_average(self, var, update, ema_decay):
return var.assign(var.read_value() * (1 - ema_decay) + (ema_decay) * update)
def quantize_tensor(self, tf_only):
if tf_only and self.parameters.mode == TFLITE:
return False
return self.parameters.quantize
def qrange_sigmoid(self, tensor, tf_only=False):
if self.quantize_tensor(tf_only):
return tf.quantization.fake_quant_with_min_max_args(tensor, 0.0, 1.0)
return tensor
def qrange_tanh(self, tensor, tf_only=False):
if self.quantize_tensor(tf_only):
return tf.quantization.fake_quant_with_min_max_args(tensor, -1.0, 1.0)
return tensor
def quantized_tanh(self, tensor, tf_only=False):
return self.qrange_tanh(tf.tanh(tensor), tf_only)
def quantized_sigmoid(self, tensor, tf_only=False):
return self.qrange_sigmoid(tf.sigmoid(tensor), tf_only)
def get_batch_dimension(self, tensor):
return tensor.get_shape().as_list()[0] or tf.shape(tensor)[0]
def inverse_normalizer(self, mask):
return tf.math.reciprocal(tf.reduce_sum(mask))
def random_drop_to_zero(self, tensor, zero_probability):
rnd = tf.random.uniform(
shape=tf.shape(tensor),
minval=-zero_probability,
maxval=(1.0 - zero_probability),
dtype=tensor.dtype)
return tf.math.ceil(rnd)
def clone_initializer(initializer):
if isinstance(initializer, tf.keras.initializers.Initializer):
return initializer.__class__.from_config(initializer.get_config())
return initializer
| 4,899 | 33.027778 | 80 | py |
models | models-master/research/seq_flow_lite/layers/misc_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for embedding."""
import math
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class AttentionPooling(base_layers.BaseLayer):
"""A basic attention pooling layer."""
def __init__(self, scalar=True, normalize=True, **kwargs):
self.scalar = scalar
# Attention logits should not have activation post linear layer so it can
# be positive or negative. This would enable the attention distribution to
# be anything that the network likes. Using relu activation makes the
# attention distribution biased towards uniform distribution.
# This gets better results for attention pooling. Though some outputs are
# emphasized for making classification decision, all other outputs have
# a non zero probability of influencing the class. This seems to result
# in better backprop.
self.attention = dense_layers.BaseQDenseVarLen(
units=1, rank=3, normalize=normalize, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(AttentionPooling, self).__init__(**kwargs)
def build(self, input_shapes):
self.feature_size = input_shapes[-1]
def call(self, inputs, mask, inverse_normalizer):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
batch_size = self.get_batch_dimension(inputs)
attn_logits = self.attention(inputs, mask, inverse_normalizer)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
invalid_mask = (1 - mask) * self.parameters.invalid_logit
attn_logits = attn_logits * mask + invalid_mask
attn_logits = tf.reshape(attn_logits, [batch_size, -1])
attention = tf.nn.softmax(attn_logits, axis=-1)
attention = self.qrange_sigmoid(attention, tf_only=True)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = tf.reshape(inputs, [-1, self.feature_size])
else:
attention = tf.expand_dims(attention, axis=1)
pre_logits = self.qactivation(tf.matmul(attention, inputs))
return tf.reshape(pre_logits, [batch_size, self.feature_size])
class TreeInductionLayer(base_layers.BaseLayer):
"""A basic tree induction layer."""
def __init__(self, **kwargs):
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(TreeInductionLayer, self).__init__(**kwargs)
def call(self, keys, queries, sequence_length):
key_dim = keys.get_shape().as_list()[-1]
query_dim = queries.get_shape().as_list()[-1]
assert key_dim == query_dim, "Last dimension of keys/queries should match."
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
sequence_mask = tf.sequence_mask(
sequence_length, maxlen=tf.shape(keys)[1], dtype=tf.float32)
sequence_mask = tf.expand_dims(sequence_mask, axis=2)
attn_mask = tf.matmul(sequence_mask, sequence_mask, transpose_b=True)
attn_logits = self.qactivation(tf.matmul(keys, queries, transpose_b=True))
invalid_attn_mask = (1 - attn_mask) * self.parameters.invalid_logit
return attn_logits * attn_mask + invalid_attn_mask
else:
assert self.get_batch_dimension(keys) == 1
assert self.get_batch_dimension(queries) == 1
keys = tf.reshape(keys, [-1, key_dim])
queries = tf.reshape(queries, [-1, key_dim])
result = self.qactivation(tf.matmul(keys, queries, transpose_b=True))
# TODO(b/171063452): Bug needs to be fixed to handle this correctly.
# seq_dim = tf.shape(result)[1]
# result = tf.reshape(result, [1, seq_dim, seq_dim])
return result
class GBSTLayerV2(base_layers.BaseLayer):
"""Tokenization layer."""
def __init__(self,
feature_size,
max_seq_len,
downsample_rate=2,
max_subword_block_width=4,
conv_kernel_size=5,
block_mixing_mode=None,
add_block_pos_embed=False,
**kwargs):
super(GBSTLayerV2, self).__init__(**kwargs)
self.feature_size = feature_size
self.max_seq_len = max_seq_len
self.downsample_rate = downsample_rate
self.subword_blocks_width = [1, 2, 3, 4]
self.max_subword_block_width = len(self.subword_blocks_width)
self.block_mixing_mode = block_mixing_mode
self.add_block_pos_embed = add_block_pos_embed
if self.add_block_pos_embed:
self.block_pos_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_subword_block_width, self.feature_size], **kwargs)
self.conv_kernel_size = conv_kernel_size
self.conv_layer = conv_layers.EncoderQConvolution(
filters=feature_size,
ksize=conv_kernel_size,
rank=3,
padding="VALID",
activation=None,
**kwargs)
padding = [conv_kernel_size - 1, 0]
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=padding)
self.block_attn = dense_layers.BaseQDense(
units=1,
rank=3,
activation=None,
normalize=False,
quantize_output=False,
**kwargs)
self.scores_concat = quantization_layers.ConcatQuantization(
axis=3, **kwargs)
self.attn_concat = quantization_layers.ConcatQuantization(axis=0, **kwargs)
self.qact = quantization_layers.ActivationQuantization(**kwargs)
self.qact_dot = quantization_layers.ActivationQuantization(**kwargs)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, seq_length):
"""Performs downsampling on the character-scale input representation.
Based in principle on https://arxiv.org/pdf/2106.12672.pdf.
Args:
inputs: float Tensor of shape [batch_size, seq_length, embedding_size].
seq_length: sequence length of shape [batch_size].
Returns:
<float>[batch_size, seq_length / downsample_rate, embedding_size].
Downsampled sequences.
"""
self._assert_rank_and_type(inputs, 3)
bsz = self.get_batch_dimension(inputs)
max_seq_len = self.max_seq_len
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
num_steps = tf.shape(inputs)[1]
inputs = self.zero_pad(inputs)
inputs = self.conv_layer(inputs)
all_block_scores = []
all_sequences = []
for subword_len in self.subword_blocks_width:
if self.add_block_pos_embed:
block_pos_indices = tf.range(subword_len, dtype=tf.int32)
block_pos_indices = tf.reshape(block_pos_indices, [1, -1])
block_pos_embeds = self.block_pos_embedding(block_pos_indices)
tile_len = math.ceil(max_seq_len / float(subword_len))
retiled_block_pos_embeds = tf.repeat(block_pos_embeds, tile_len, axis=1)
inputs += retiled_block_pos_embeds
# For this block size, form candidate block embeddings and scores.
# candidates shape: [batch, seq_len/subword_len, dim]
# block_scores shape: [batch, seq_len/subword_len, 1]
candidates = tf.nn.avg_pool(
inputs, [subword_len], strides=[subword_len], padding="SAME")
candidates = self.conv_layer.quantize_using_output_range(candidates)
block_scores = self.block_attn(candidates)
# Upsample it back to the original sequence length.
retiled_seq = tf.repeat(candidates, subword_len, axis=1)
retiled_block_scores = tf.repeat(block_scores, subword_len, axis=1)
# Make sure everything is the right length and add new dimension to concat
# candidate blocks on.
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
retiled_block_scores = retiled_block_scores[:, :num_steps, :]
retiled_seq = retiled_seq[:, :num_steps, :]
else:
retiled_block_scores = retiled_block_scores[:, :max_seq_len, :]
retiled_seq = retiled_seq[:, :max_seq_len, :]
retiled_seq = tf.expand_dims(retiled_seq, axis=-1)
retiled_block_scores = tf.expand_dims(retiled_block_scores, axis=-1)
all_sequences.append(retiled_seq)
all_block_scores.append(retiled_block_scores)
block_net = self.scores_concat(all_block_scores)
if self.block_mixing_mode == "score_attention":
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
block_attn_steps = []
self.attn_concat(None)
for i in range(num_steps):
block_i = tf.reshape(block_net[:, i:i + 1, :, :], [1, -1])
block_attn_steps.append(tf.matmul(block_i, block_i, transpose_b=True))
block_attn = self.attn_concat(block_attn_steps)
block_attn = tf.reshape(block_attn, [bsz, -1, 1, 1])
else:
block_attn = self.attn_concat(
[tf.matmul(block_net, block_net, transpose_b=True)])
block_attn = tf.nn.softmax(block_attn, axis=1)
block_attn = self.qrange_sigmoid(block_attn, tf_only=True)
block_net_scaled = self.qact(block_attn * block_net)
else:
block_net_scaled = block_net
candidate_embeds = self.conv_layer.quantize_using_output_range(
tf.concat(all_sequences, axis=3))
dot_product = self.qact_dot(block_net_scaled * candidate_embeds)
output = self.qoutput(tf.reduce_mean(dot_product, axis=-1, keepdims=True))
output = tf.reshape(output, [bsz, -1, self.feature_size])
# Removing pad entries for inference mode.
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
output = output[:, :num_steps, :]
# Downsample by mean pooling.
if self.downsample_rate > 1:
output = tf.nn.avg_pool(
output, (self.downsample_rate,),
strides=(self.downsample_rate,),
padding="VALID")
return output
| 10,559 | 42.636364 | 80 | py |
models | models-master/research/seq_flow_lite/layers/dense_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic dense layers."""
import copy
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class BaseQDense(base_layers.BaseLayer):
"""Quantized encoder dense layers."""
def __init__(self,
units,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=2,
normalize=True,
quantize_output=True,
normalization_fn=None,
**kwargs):
self.units = units
self.rank = rank
assert rank >= 2 and rank <= 4
self.activation = activation
self.bias = bias
self.normalize = normalize
self.quantize_output = quantize_output
if quantize_output:
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(normalization_fn=normalization_fn, **kwargs)
super(BaseQDense, self).__init__(**kwargs)
def build(self, input_shapes):
assert len(input_shapes) == self.rank
if self.rank == 4:
assert input_shapes[1] == 1 or input_shapes[2] == 1
self.in_units = input_shapes[-1]
shape = [self.in_units, self.units]
self.w = self.add_weight_wrapper(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.units])
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.BatchNormalization(**kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def _dense_r2(self, inputs, normalize_method):
outputs = tf.matmul(inputs, self.quantize_parameter(self.w))
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
if self.normalize:
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
if self.quantize_output:
return self.qoutput(outputs)
else:
return outputs
def _dense_r34(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
outputs = tf.reshape(inputs, [-1, self.in_units])
outputs = self._dense_r2(outputs, normalize_method)
if self.rank == 3:
return tf.reshape(outputs, [bsz, -1, self.units])
elif inputs.get_shape().as_list()[1] == 1:
return tf.reshape(outputs, [bsz, 1, -1, self.units])
else:
return tf.reshape(outputs, [bsz, -1, 1, self.units])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 2:
return self._dense_r2(inputs, normalize_method)
return self._dense_r34(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class BaseQDenseVarLen(BaseQDense):
"""Dense on variable length sequence."""
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=2, **kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = self.inverse_normalizer(mask)
def normalize_method(tensor):
maskr2 = tf.reshape(mask, [-1, 1])
return self.normalization(tensor, maskr2, inverse_normalizer)
return self._do_call(inputs, normalize_method)
| 4,317 | 34.105691 | 80 | py |
models | models-master/research/seq_flow_lite/layers/qrnn_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for QRNN."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
QUASI_RNN_POOLING_F = "f"
QUASI_RNN_POOLING_FO = "fo"
QUASI_RNN_POOLING_IFO = "ifo"
_QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP = {
QUASI_RNN_POOLING_F: 2,
QUASI_RNN_POOLING_FO: 3,
QUASI_RNN_POOLING_IFO: 4,
}
class QRNNUnidirectionalPoolingCore(base_layers.BaseLayer):
"""Create a unidirectional QRNN pooling inner loop."""
def __init__(self, forward=True, **kwargs):
self.forward = forward
super(QRNNUnidirectionalPoolingCore, self).__init__(**kwargs)
def call(self, multiplier, constant):
if self.parameters.mode != base_layers.TFLITE:
return self._qrnn_pooling(multiplier, constant)
else:
return tf_custom_ops_py.pooling_op(multiplier, constant,
[1.0 if self.forward else 0.0])
def _qrnn_pooling(self, multipler, constant):
"""Pooling step computes the internal states for all timesteps."""
assert multipler.get_shape().as_list() == constant.get_shape().as_list()
gate_static_shape = multipler.get_shape().as_list()
gate_shape = tf.shape(multipler)
feature_size = gate_static_shape[2]
assert feature_size is not None
batch_size = gate_static_shape[0] or gate_shape[0]
max_timestep = gate_static_shape[1] or gate_shape[1]
dynamic_loop = gate_static_shape[1] is None
# Get multiplier/constant in [timestep, batch, feature_size] format
multiplier_transposed = tf.transpose(multipler, [1, 0, 2])
constant_transposed = tf.transpose(constant, [1, 0, 2])
# Start state
state = tf.zeros((batch_size, feature_size), tf.float32)
if dynamic_loop:
# One pooling step
def _step(index, state, states):
m = multiplier_transposed[index, :, :]
c = constant_transposed[index, :, :]
new_state = state * m + c
next_index = index + 1 if self.forward else index - 1
return next_index, new_state, states.write(index, new_state)
# Termination condition
def _termination(index, state, states):
del state, states
return (index < max_timestep) if self.forward else (index >= 0)
states = tf.TensorArray(tf.float32, size=max_timestep)
index = 0 if self.forward else max_timestep - 1
# Dynamic pooling loop
_, state, states = tf.while_loop(_termination, _step,
[index, state, states])
states = states.stack()
else:
# Unstack them to process one timestep at a time
multiplier_list = tf.unstack(multiplier_transposed)
constant_list = tf.unstack(constant_transposed)
states = []
# Unroll either forward or backward based on the flag `forward`
timesteps = list(range(max_timestep)) if self.forward else reversed(
list(range(max_timestep)))
# Static pooling loop
for time in timesteps:
state = state * multiplier_list[time] + constant_list[time]
states.append(state)
# Stack them back in the right order
states = tf.stack(states if self.forward else list(reversed(states)))
# Change to [batch, timestep, feature_size]
return tf.transpose(states, [1, 0, 2])
class QRNNUnidirectionalPooling(base_layers.BaseLayer):
"""Create a unidirectional QRNN pooling."""
def __init__(self,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
**kwargs):
self.zoneout_probability = zoneout_probability
self.pooling = pooling
self.forward = forward
self.output_quantized = output_quantized
if output_quantized and self.pooling == QUASI_RNN_POOLING_IFO:
self.qoutputs = quantization_layers.ActivationQuantization()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.pooling_core = QRNNUnidirectionalPoolingCore(forward=forward, **kwargs)
super(QRNNUnidirectionalPooling, self).__init__(**kwargs)
def call(self, gates, mask):
return self._create_qrnn_pooling_unidirectional(gates, mask)
def _qrnn_preprocess(self, gates):
"""Preprocess the gate inputs to the pooling layer."""
assert self.num_gates == len(gates)
dim = lambda tensor, index: tensor.get_shape().as_list()[index]
for tensor in gates:
assert len(tensor.get_shape().as_list()) == 3
for idx in range(3):
assert dim(gates[0], idx) == dim(tensor, idx)
if self.pooling == QUASI_RNN_POOLING_F:
z = self.quantized_tanh(gates[0], tf_only=True)
f = self.quantized_sigmoid(gates[1], tf_only=True)
return f, self.qrange_tanh(self.qrange_sigmoid(1 - f) * z), 1
elif self.pooling == QUASI_RNN_POOLING_FO:
z = self.quantized_tanh(gates[0], tf_only=True)
f = self.quantized_sigmoid(gates[1], tf_only=True)
o = self.quantized_sigmoid(gates[2], tf_only=True)
return f, self.qrange_tanh(self.qrange_sigmoid(1 - f) * z), o
else: # self.pooling == QUASI_RNN_POOLING_IFO:
z = self.quantized_tanh(gates[0], tf_only=True)
i = self.quantized_sigmoid(gates[1], tf_only=True)
f = self.quantized_sigmoid(gates[2], tf_only=True)
o = self.quantized_sigmoid(gates[3], tf_only=True)
return f, self.qrange_tanh(i * z), o
def _qrnn_postprocess(self, states, multiplier):
"""Postprocess the states and return the output tensors."""
if self.pooling == QUASI_RNN_POOLING_F:
return states
elif self.pooling == QUASI_RNN_POOLING_FO:
return self.qrange_tanh(states) * multiplier
else: # self.pooling == QUASI_RNN_POOLING_IFO
return self.qoutputs(states) * multiplier
def _qrnn_zoneout(self, multipler, constant):
"""Zoneout regularization for Quasi RNN."""
enable_zoneout = self.zoneout_probability > 0.0
if enable_zoneout and self.parameters.mode == base_layers.TRAIN:
# zoneout_mask is 1.0 with self.zoneout_probability and 0.0 with
# probability (1 - self.zoneout_probability)
zoneout_mask = tf.random.uniform(tf.shape(multipler), maxval=1.0)
zoneout_mask = tf.floor(zoneout_mask + self.zoneout_probability)
# When zoneout_mask is 1.0, do not update the state, retain the old state.
# This is achieved by making the multiplier 1.0 and constant 0.0.
# When zoneout_mask is 0.0 the multiplier and constant are unaffected.
# multipler is expected to be in the range [0.0, 1.0]. This is true since
# it is the result of a sigmoid.
multipler = tf.maximum(zoneout_mask, multipler)
constant *= (1 - zoneout_mask)
return multipler, constant
def _create_qrnn_pooling_unidirectional(self, gates, mask):
"""Create QRNN Pooling in either forward or backward direction."""
m1, c1, outgate = self._qrnn_preprocess(gates)
# For inference zero padding will not be used. Hence sequence length is
# not necessary.
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
m1 = m1 * mask + (1 - mask) * tf.ones_like(m1)
c1 *= mask
m1, c1 = self._qrnn_zoneout(m1, c1)
states = self.pooling_core(m1, c1)
outputs = self._qrnn_postprocess(states, outgate)
# For inference zero padding will not be used. Hence sequence length is
# not necessary.
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs *= mask
if self.output_quantized:
if self.pooling in [QUASI_RNN_POOLING_FO, QUASI_RNN_POOLING_F]:
outputs = self.qrange_tanh(outputs)
else:
outputs = self.qoutputs.quantize_using_range(outputs)
return outputs
class QRNNUnidirectional(base_layers.BaseLayer):
"""Create a unidirectional QRNN encoder."""
def __init__(self,
kwidth,
state_size,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
normalization_fn=None,
**kwargs):
self.forward = forward
self.kwidth = kwidth
self.pooling = pooling
self.state_size = state_size
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
self.gate_layers = []
for _ in range(self.num_gates):
self.gate_layers.append(
conv_layers.EncoderQConvolutionVarLen(
filters=state_size,
ksize=kwidth,
rank=3,
padding="VALID",
activation=None,
normalization_fn=normalization_fn,
**kwargs))
padding = [kwidth - 1, 0] if forward else [0, kwidth - 1]
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=padding)
self.qrnn_pooling = QRNNUnidirectionalPooling(
forward=forward,
zoneout_probability=zoneout_probability,
output_quantized=output_quantized,
pooling=pooling,
**kwargs)
super(QRNNUnidirectional, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
maskr4 = tf.expand_dims(mask, axis=1)
padded_inputs = self.zero_pad(inputs)
gates = [
layer(padded_inputs, maskr4, inverse_normalizer)
for layer in self.gate_layers
]
return self.qrnn_pooling(gates, mask)
class QRNNUnidirectionalWithBottleneck(base_layers.BaseLayer):
"""Create a unidirectional QRNN encoder with bottlenecks."""
def __init__(self,
kwidth,
state_size,
bottleneck_size,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
**kwargs):
self.bottleneck_size = bottleneck_size
self.state_size = state_size
self.forward = forward
self.kwidth = kwidth
self.pooling = pooling
self.state_size = state_size
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
self.qrnn_pooling = QRNNUnidirectionalPooling(
forward=forward,
zoneout_probability=zoneout_probability,
output_quantized=output_quantized,
pooling=pooling,
**kwargs)
self.pre_conv_layers = []
self.gate_layers = []
self.post_conv_layers = []
for _ in range(self.num_gates):
self.pre_conv_layers.append(
dense_layers.BaseQDense(bottleneck_size, rank=3, **kwargs))
self.gate_layers.append(
conv_layers.EncoderQConvolution(
filters=bottleneck_size,
ksize=kwidth,
rank=3,
padding="SAME",
normalization_fn=None,
**kwargs))
self.post_conv_layers.append(
dense_layers.BaseQDense(
state_size, rank=3, activation=None, **kwargs))
super(QRNNUnidirectionalWithBottleneck, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
pre_conv_out = [layer(inputs) for layer in self.pre_conv_layers]
gates = [layer(pre_conv_out[i]) for i, layer in enumerate(self.gate_layers)]
post_conv_out = [
layer(gates[i]) for i, layer in enumerate(self.post_conv_layers)
]
return self.qrnn_pooling(post_conv_out, mask)
class QRNNBidirectional(base_layers.BaseLayer):
"""Create a bidirectional QRNN encoder."""
def __init__(self,
kwidth,
state_size,
zoneout_probability=0.0,
pooling=QUASI_RNN_POOLING_FO,
bottleneck_size=None,
normalization_fn=None,
**kwargs):
self.pooling = pooling
if bottleneck_size is None:
self.forward = QRNNUnidirectional(
kwidth=kwidth,
state_size=state_size,
forward=True,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
normalization_fn=normalization_fn,
**kwargs)
self.backward = QRNNUnidirectional(
kwidth=kwidth,
state_size=state_size,
forward=False,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
normalization_fn=normalization_fn,
**kwargs)
else:
assert normalization_fn is None, (
"normalization_fn will not take an effect")
self.forward = QRNNUnidirectionalWithBottleneck(
kwidth=kwidth,
state_size=state_size,
bottleneck_size=bottleneck_size,
forward=True,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
**kwargs)
self.backward = QRNNUnidirectionalWithBottleneck(
kwidth=kwidth,
state_size=state_size,
bottleneck_size=bottleneck_size,
forward=False,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
**kwargs)
self.qconcat = quantization_layers.ConcatQuantization(axis=2, **kwargs)
super(QRNNBidirectional, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
fwd_outputs = self.forward(inputs, mask, inverse_normalizer)
bwd_outputs = self.backward(inputs, mask, inverse_normalizer)
if self.pooling in [QUASI_RNN_POOLING_FO, QUASI_RNN_POOLING_F]:
outputs = [self.qrange_tanh(fwd_outputs), self.qrange_tanh(bwd_outputs)]
outputs = self.qrange_tanh(tf.concat(outputs, axis=2))
else:
outputs = self.qconcat([fwd_outputs, bwd_outputs])
return outputs
class QRNNBidirectionalStack(base_layers.BaseLayer):
"""Create a stack of bidirectional QRNN encoder."""
def __init__(self,
num_layers,
kwidth,
state_size,
zoneout_probability=0.0,
layerwise_decaying_zoneout=True,
pooling=QUASI_RNN_POOLING_FO,
bottleneck_size=None,
normalization_fn=None,
**kwargs):
self.layers = []
zp = zoneout_probability
for idx in range(num_layers):
if layerwise_decaying_zoneout:
zp = (zoneout_probability**(idx + 1))
self.layers.append(
QRNNBidirectional(
kwidth=kwidth,
state_size=state_size,
zoneout_probability=zp,
pooling=pooling,
bottleneck_size=bottleneck_size,
normalization_fn=normalization_fn,
**kwargs))
super(QRNNBidirectionalStack, self).__init__(**kwargs)
def call(self, inputs, maskr3, inverse_normalizer):
return self._apply_qrnn_stack(inputs, maskr3, inverse_normalizer)
def _apply_qrnn_stack(self, inputs, mask3, inverse_normalizer):
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask3
for layer in self.layers:
outputs = layer(inputs, mask3, inverse_normalizer)
inputs = outputs
return outputs
class QRNNBidirectionalStackWithSeqLength(QRNNBidirectionalStack):
def call(self, inputs, sequence_length):
mask = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, 2)
return self._apply_qrnn_stack(inputs, maskr3, inverse_normalizer)
| 17,000 | 36.864143 | 80 | py |
models | models-master/research/seq_flow_lite/layers/projection_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow projection creator for PRADO model."""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from tf_ops import sequence_string_projection_op as ssp # import seq_flow_lite module
from tf_ops import sequence_string_projection_op_v2 as sspv2 # import seq_flow_lite module
class ProjectionLayer(base_layers.BaseLayer):
"""Base class for encoders."""
def __init__(self, model_config, mode, **kwargs):
"""Create projection."""
def _get_params(varname, default_value=None):
value = model_config[varname] if varname in model_config else default_value
default = "" if varname in model_config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
self.mode = mode
_get_params("feature_size")
_get_params("max_seq_len", 0)
_get_params("add_eos_tag", False)
_get_params("add_bos_tag", False)
_get_params("hashtype", "murmur")
_get_params("split_on_space", True)
_get_params("token_separators", "")
_get_params("vocabulary", "")
_get_params("quantize")
_get_params("word_novelty_bits", 0)
_get_params("doc_size_levels", 0)
self.distortion_probability = 0.0
if mode == base_layers.TRAIN:
_get_params("distortion_probability", 0.0)
parameters = base_layers.Parameters(mode, self.quantize)
super(ProjectionLayer, self).__init__(parameters=parameters, **kwargs)
def call(self, inputs):
projection, _, seq_length = ssp.sequence_string_projection(
input=inputs,
feature_size=self.feature_size,
max_splits=self.max_seq_len - 1,
hashtype=self.hashtype,
distortion_probability=self.distortion_probability,
split_on_space=self.split_on_space,
token_separators=self.token_separators,
word_novelty_bits=self.word_novelty_bits,
doc_size_levels=self.doc_size_levels,
add_eos_tag=self.add_eos_tag,
add_bos_tag=self.add_bos_tag,
vocabulary=self.vocabulary)
modes = [base_layers.PREDICT, base_layers.TFLITE]
if self.mode not in modes and self.max_seq_len > 0:
short_by = self.max_seq_len - tf.shape(projection)[1]
projection = tf.pad(projection, [[0, 0], [0, short_by], [0, 0]])
batch_size = self.get_batch_dimension(inputs)
projection = tf.reshape(projection,
[batch_size, self.max_seq_len, self.feature_size])
projection = self.qrange_tanh(projection)
return projection, seq_length
class ProjectionLayerPreSegmented(base_layers.BaseLayer):
"""Base class for encoders."""
def __init__(self, model_config, mode, **kwargs):
"""Create projection."""
def _get_params(varname, default_value=None):
value = model_config[varname] if varname in model_config else default_value
default = "" if varname in model_config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
self.mode = mode
_get_params("feature_size")
_get_params("add_eos_tag", False)
_get_params("add_bos_tag", False)
_get_params("vocabulary", "")
_get_params("quantize")
self.distortion_probability = 0.0
if mode == base_layers.TRAIN:
_get_params("distortion_probability", 0.0)
parameters = base_layers.Parameters(mode, self.quantize)
super(ProjectionLayerPreSegmented, self).__init__(
parameters=parameters, **kwargs)
def call(self, inputs):
tokens, sequence_length = inputs
projection = sspv2.sequence_string_projection_v2(
input=tokens,
sequence_length=sequence_length,
feature_size=self.feature_size,
distortion_probability=self.distortion_probability,
add_eos_tag=self.add_eos_tag,
add_bos_tag=self.add_bos_tag,
vocabulary=self.vocabulary)
modes = [base_layers.PREDICT, base_layers.TFLITE]
if self.mode in modes:
projection = self.qrange_tanh(projection)
return projection
| 4,725 | 38.057851 | 90 | py |
models | models-master/research/seq_flow_lite/layers/embedding_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for embedding."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class EmbeddingLayer(base_layers.BaseLayer):
"""Embedding layer."""
def __init__(self,
shape,
num_bits=8,
initializer=None,
trainable=True,
**kwargs):
self.shape = shape
self.quantizer = quantization_layers.ActivationQuantization(
num_bits=num_bits, **kwargs)
super(EmbeddingLayer, self).__init__(**kwargs)
if initializer is None:
initializer = tf.keras.initializers.GlorotUniform()
self.initializer = initializer
self.trainable = trainable
def build(self, input_shapes):
self.embedding_table = self.add_weight(
name="embedding_table",
shape=self.shape,
initializer=self.initializer,
trainable=self.trainable,
dtype=tf.float32)
if self.trainable:
self.add_reg_loss(self.embedding_table)
def call(self, indices):
assert indices.dtype in [tf.int64, tf.int32]
outputs = tf.nn.embedding_lookup(self.embedding_table, indices)
return self.quantizer(outputs)
class EmbeddingFullyConnected(EmbeddingLayer):
"""Uses embedding table as weights in a fully connected op."""
def __init__(self, **kwargs):
shape = kwargs.pop("shape", None)
initializer = kwargs.pop("initializer", None)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
super(EmbeddingFullyConnected, self).__init__(
shape=shape, initializer=initializer, **kwargs)
def fully_connected(self, inputs, bias=None, weights_scale_factor=None):
# This method can only be called after a call to "call" method in this class
self._assert_rank_and_type(inputs, 2)
weights = self.embedding_table
if weights_scale_factor is not None:
weights = weights * weights_scale_factor
outputs = tf.matmul(inputs, weights, transpose_b=True)
if bias is not None:
outputs = tf.nn.bias_add(outputs, bias)
return self.qoutput(outputs)
| 2,814 | 36.039474 | 80 | py |
models | models-master/research/seq_flow_lite/layers/normalization_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for normalization."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class BatchNormalization(base_layers.BaseLayer):
"""A class that applies batch normalization to the input tensor."""
def __init__(self, ema_decay=0.999, **kwargs):
self.ema_decay = ema_decay
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shapes):
self.reduce_dims = list(range(len(input_shapes) - 1))
shape = [input_shapes[-1]]
self.offset = self.add_weight(
"offset",
shape=shape,
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.scale = self.add_weight(
"scale",
shape=shape,
initializer=tf.keras.initializers.Ones(),
trainable=True)
self.mva_mean = self.add_weight(
"mva_mean",
shape=shape,
initializer=tf.keras.initializers.Zeros(),
trainable=False)
self.mva_var = self.add_weight(
"mva_variance",
shape=shape,
initializer=tf.keras.initializers.Ones(),
trainable=False)
def call(self, inputs):
mean_mom, var_mom = None, None
if self.parameters.mode == base_layers.TRAIN:
mean_mom, var_mom = tf.nn.moments(inputs, self.reduce_dims)
return self._batch_norm(inputs, mean_mom, var_mom)
def _batch_norm(self, inputs, mean_mom, var_mom):
if self.parameters.mode == base_layers.TRAIN:
# During training compute summay stats, update them to moving average
# variables and use the summary stas for batch normalization.
with tf.control_dependencies([
self.assign_moving_average(self.mva_mean, mean_mom, self.ema_decay),
self.assign_moving_average(self.mva_var, var_mom, self.ema_decay)
]):
tensor = tf.nn.batch_normalization(inputs, mean_mom, var_mom,
self.offset, self.scale, 1e-9)
else:
# During eval/inference use the moving average variable for batch
# normalization. The variables would be frozen to constants before
# saving graph.
tensor = tf.nn.batch_normalization(inputs, self.mva_mean, self.mva_var,
self.offset, self.scale, 1e-9)
return tensor
class VarLenBatchNormalization(BatchNormalization):
"""A class that applies batch normalization to the input tensor."""
def __init__(self, rank=2, **kwargs):
self.rank = rank
assert rank == 2 or rank == 4
super(VarLenBatchNormalization, self).__init__(**kwargs)
def _reduce(self, tensor, multiplier):
return tf.reduce_sum(tensor, axis=self.reduce_dims) * multiplier
def call(self, inputs, mask, inverse_normalizer):
if self.parameters.mode == base_layers.TRAIN:
self._assert_rank_and_type(inputs, self.rank)
self._assert_rank_and_type(mask, self.rank)
inputs = mask * inputs
mean_mom = self._reduce(inputs, inverse_normalizer)
var_mom = self._reduce(inputs * inputs, inverse_normalizer)
return mask * self._batch_norm(inputs, mean_mom, var_mom)
elif self.parameters.mode == base_layers.EVAL:
return mask * self._batch_norm(inputs, None, None)
return self._batch_norm(inputs, None, None)
class LayerNormalization(base_layers.BaseLayer):
"""A class that applies layer normalization to the input tensor."""
def __init__(self, axes=None, **kwargs):
self.axes = axes or [-1]
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.rank = len(input_shape)
for i, axis in enumerate(self.axes):
if axis < 0:
self.axes[i] += self.rank
assert (self.axes[i] > 0 and self.axes[i] < self.rank)
self.offset = self.add_weight(
"offset",
shape=[1],
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.scale = self.add_weight(
"scale",
shape=[1],
initializer=tf.keras.initializers.Ones(),
trainable=True)
def call(self, tensor):
tensor = self.qactivation(tensor)
if self.parameters.mode != base_layers.TFLITE:
mean, variance = tf.nn.moments(tensor, self.axes, keepdims=True)
# If all the values in the tensor are same, variance will be 0. Adding a
# small epsilon to variance ensures that we get 0 as the normalized result
# instead of NaN in the resulting tensor.
tensor = (tensor - mean) / tf.sqrt(variance + 1e-6)
return tensor * self.scale + self.offset
else:
return tf_custom_ops_py.layer_norm(
tensor, self.scale, self.offset, axes=self.axes)
| 5,515 | 38.4 | 80 | py |
models | models-master/research/seq_flow_lite/layers/transformer_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for Transformer encoder."""
# pylint: disable=arguments-renamed
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class SelfAttention(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.dense_layers = [
dense_layers.BaseQDenseVarLen(
units=self.filters, activation=None, **kwargs)
for i in range(num_heads * 3)
]
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=2, **kwargs)
super(SelfAttention, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
tensors = [
layer(inputs_rank2, mask_rank2, inverse_normalizer)
for layer in self.dense_layers
]
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
tensors = [
tf.reshape(tensor, [batch_size, -1, self.filters])
for tensor in tensors
]
context = []
if attn_mask is None:
attn_mask = tf.matmul(mask, tf.transpose(mask, [0, 2, 1]))
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
for _ in range(self.num_heads):
keys = tensors.pop()
values = tensors.pop()
queries = tensors.pop()
# Attention is not scaled dot product, batch normalization compensates
# for it.
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(queries, [0, 2, 1])
attn_logits = self.qactivation(tf.matmul(keys, queries))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
else:
queries = tf.transpose(queries)
attn_logits_masked = self.qactivation(tf.matmul(keys, queries))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
ctx = tf.matmul(attention, values)
ctx = tf.reshape(ctx, [1, -1, self.filters])
context.append(ctx)
return self.qconcat(context)
class SelfAttentionV2(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension * 3, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(SelfAttentionV2, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
tensors = self.dense_layers(inputs_rank2, mask_rank2, inverse_normalizer)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
tensors = tf.reshape(tensors, [bsz, -1, 3, self.num_heads, self.filters])
tensors = tf.unstack(tensors, axis=2)
else:
tensors = tf.split(tensors, self.num_heads * 3, axis=1)
if attn_mask is None:
attn_mask = tf.matmul(mask, mask, transpose_b=True)
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
attn_mask = tf.expand_dims(attn_mask, axis=1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(tensors[0], [0, 2, 1, 3])
keys = tf.transpose(tensors[1], [0, 2, 1, 3])
values = tf.transpose(tensors[2], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
context = []
for idx in range(self.num_heads):
queries = tensors[idx]
keys = tensors[idx + self.num_heads]
values = tensors[idx + self.num_heads * 2]
# Attention is not scaled dot product, batch normalization compensates
# for it.
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
result = self.qconcat(context)
return tf.reshape(result, [1, -1, self.model_dimension])
class TransformerEncoder(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self,
model_dimension,
num_heads,
intermediate_size,
initializer_stddev=0.02,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.model_dimension = model_dimension
self.parameters.initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_stddev)
self.self_attn = SelfAttentionV2(
model_dimension,
num_heads,
attention_dropout_rate=attention_dropout_rate,
parameters=self.parameters)
self.prx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.upprx = dense_layers.BaseQDenseVarLen(
intermediate_size, parameters=self.parameters)
self.downprx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
mask_rank2 = tf.reshape(mask, [-1, 1])
assert inputs.get_shape().as_list()[-1] == self.model_dimension
tensor = self.self_attn(inputs, mask, inverse_normalizer, attn_mask)
inputs = tf.reshape(inputs, [-1, self.model_dimension])
tensor = tf.reshape(tensor, [-1, self.model_dimension])
tensor = self.prx(tensor, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
tensor = tf.nn.dropout(tensor, rate=self.activation_dropout_rate)
inputs_plus_selfattn = self.q1(self.ln1(inputs + tensor))
ffn_up = self.upprx(inputs_plus_selfattn, mask_rank2, inverse_normalizer)
ffn_down = self.downprx(ffn_up, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
ffn_down = tf.nn.dropout(ffn_down, rate=self.activation_dropout_rate)
inputs_plus_ffn = self.q2(self.ln2(inputs_plus_selfattn + ffn_down))
return tf.reshape(inputs_plus_ffn, [batch_size, -1, self.model_dimension])
class TransformerEncoderStack(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self, num_layers, max_time_step, vocabulary_size, embedding_size,
model_dimension, num_heads, intermediate_size, **kwargs):
self.max_time_step = max_time_step
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
activation_dropout_rate = kwargs.pop('activation_dropout_rate', 0.0)
attention_dropout_rate = kwargs.pop('attention_dropout_rate', 0.0)
self.layers = []
for _ in range(num_layers):
self.layers.append(
TransformerEncoder(
model_dimension=model_dimension,
num_heads=num_heads,
intermediate_size=intermediate_size,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.embedding_size], **kwargs)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_time_step, self.embedding_size], **kwargs)
self.ln = normalization_layers.LayerNormalization(**kwargs)
self.qact = quantization_layers.ActivationQuantization(**kwargs)
super(TransformerEncoderStack, self).__init__(**kwargs)
def call(self, input_indices, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(input_indices)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank3))
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
sequence_length = tf.reduce_sum(input_indices + 1 - input_indices)
pos_indices = tf.range(sequence_length, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
else:
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
input_values = self.embedding(input_indices)
pos_values = self.positional_embedding(pos_indices)
inputs = self.qact(self.ln(input_values + pos_values))
attn_mask = tf.matmul(mask_rank3, tf.transpose(mask_rank3, [0, 2, 1]))
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
for layer in self.layers:
outputs = layer(inputs, mask_rank3, inverse_normalizer, attn_mask)
inputs = outputs
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs = outputs * mask_rank3
return outputs
class TransformerEncoderStackWithInputEmbedding(TransformerEncoderStack):
"""Transformer Encoder."""
def call(self, inputs, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank3))
attn_mask = tf.matmul(mask_rank3, tf.transpose(mask_rank3, [0, 2, 1]))
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
for layer in self.layers:
outputs = layer(inputs, mask_rank3, inverse_normalizer, attn_mask)
inputs = outputs
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs = outputs * mask_rank3
return outputs
class FunnelAttention(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.q_dense_layer = dense_layers.BaseQDenseVarLen(
units=model_dimension, activation=None, **kwargs)
self.kv_dense_layer = dense_layers.BaseQDenseVarLen(
units=model_dimension * 2, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(FunnelAttention, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
self._assert_rank_and_type(memory, 3)
self._assert_rank_and_type(memory_mask, 3)
assert memory.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
q_tensor = self.q_dense_layer(inputs_rank2, mask_rank2, inverse_normalizer)
memory_rank2 = tf.reshape(memory, [-1, self.model_dimension])
memory_mask_rank2 = tf.reshape(memory_mask, [-1, 1])
kv_tensors = self.kv_dense_layer(memory_rank2, memory_mask_rank2,
inverse_normalizer)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
q_tensor = tf.reshape(q_tensor, [bsz, -1, self.num_heads, self.filters])
kv_tensors = tf.reshape(kv_tensors,
[bsz, -1, 2, self.num_heads, self.filters])
kv_tensors = tf.unstack(kv_tensors, axis=2)
else:
q_tensor = tf.split(q_tensor, self.num_heads, axis=1)
kv_tensors = tf.split(kv_tensors, self.num_heads * 2, axis=1)
attn_mask = tf.expand_dims(attn_mask, axis=1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(q_tensor, [0, 2, 1, 3])
keys = tf.transpose(kv_tensors[0], [0, 2, 1, 3])
values = tf.transpose(kv_tensors[1], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
context = []
for idx in range(self.num_heads):
queries = q_tensor[idx]
keys = kv_tensors[idx]
values = kv_tensors[idx + self.num_heads]
# Attention is not scaled dot product, batch normalization compensates
# for it.
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
result = self.qconcat(context)
return tf.reshape(result, [1, -1, self.model_dimension])
class FunnelTransformerEncoder(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self,
model_dimension,
num_heads,
intermediate_size,
initializer_stddev=0.02,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(FunnelTransformerEncoder, self).__init__(**kwargs)
self.model_dimension = model_dimension
self.parameters.initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_stddev)
self.self_attn = FunnelAttention(
model_dimension,
num_heads,
attention_dropout_rate=attention_dropout_rate,
parameters=self.parameters)
self.prx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.upprx = dense_layers.BaseQDenseVarLen(
intermediate_size, parameters=self.parameters)
self.downprx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, mask, inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
mask_rank2 = tf.reshape(mask, [-1, 1])
assert inputs.get_shape().as_list()[-1] == self.model_dimension
tensor = self.self_attn(inputs, mask, inverse_normalizer, memory,
memory_mask, memory_inverse_normalizer, attn_mask)
inputs = tf.reshape(inputs, [-1, self.model_dimension])
tensor = tf.reshape(tensor, [-1, self.model_dimension])
tensor = self.prx(tensor, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
tensor = tf.nn.dropout(tensor, rate=self.activation_dropout_rate)
inputs_plus_selfattn = self.q1(self.ln1(inputs + tensor))
ffn_up = self.upprx(inputs_plus_selfattn, mask_rank2, inverse_normalizer)
ffn_down = self.downprx(ffn_up, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
ffn_down = tf.nn.dropout(ffn_down, rate=self.activation_dropout_rate)
inputs_plus_ffn = self.q2(self.ln2(inputs_plus_selfattn + ffn_down))
return tf.reshape(inputs_plus_ffn, [batch_size, -1, self.model_dimension])
class FunnelTransformerEncoderStack(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self, num_layers, max_time_step, vocabulary_size, embedding_size,
model_dimension, num_heads, intermediate_size, **kwargs):
self.max_time_step = max_time_step
self.pool_windows = kwargs.pop('pool_windows', [])
assert len(self.pool_windows) == num_layers
self.vocabulary_size = vocabulary_size
activation_dropout_rate = kwargs.pop('activation_dropout_rate', 0.0)
attention_dropout_rate = kwargs.pop('attention_dropout_rate', 0.0)
self.layers = []
for _ in range(num_layers):
self.layers.append(
FunnelTransformerEncoder(
model_dimension=model_dimension,
num_heads=num_heads,
intermediate_size=intermediate_size,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
super(FunnelTransformerEncoderStack, self).__init__(**kwargs)
def call(self, inputs, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
pooled_inputs = inputs
pooled_mask = mask_rank3
pooled_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(pooled_mask))
memory = pooled_inputs
memory_mask = pooled_mask
memory_inverse_normalizer = pooled_inverse_normalizer
for i, layer in enumerate(self.layers):
if self.pool_windows[i] > 1:
pooled_inputs = tf.nn.avg_pool(
pooled_inputs, [self.pool_windows[i]],
strides=[self.pool_windows[i]],
padding='SAME')
pooled_mask = pooled_mask[:, ::self.pool_windows[i], :]
pooled_inverse_normalizer = tf.math.reciprocal(
tf.reduce_sum(pooled_mask))
attn_mask = tf.matmul(pooled_mask, memory_mask, transpose_b=True)
pooled_outputs = layer(pooled_inputs, pooled_mask,
pooled_inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask)
pooled_inputs = pooled_outputs
pooled_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(pooled_mask))
memory = pooled_inputs
memory_mask = pooled_mask
memory_inverse_normalizer = pooled_inverse_normalizer
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
pooled_outputs = pooled_outputs * pooled_mask
return pooled_outputs, pooled_mask
class DecoderMultiheadAttention(base_layers.BaseLayer):
"""Multihead attention for decoder."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
cached_kv=False,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.cached_kv = cached_kv
self.q_dense_layers = dense_layers.BaseQDense(
units=model_dimension,
activation=None,
normalize=False,
bias=False,
**kwargs)
self.kv_dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension * 2, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(DecoderMultiheadAttention, self).__init__(**kwargs)
def call(self,
inputs,
input_mask,
input_inverse_normalizer,
memory=None,
memory_mask=None,
memory_inverse_normalizer=None,
attn_mask=None):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(input_mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
q_tensor = self.q_dense_layers(inputs_rank2)
if memory is not None:
self._assert_rank_and_type(memory, 2)
self._assert_rank_and_type(memory_mask, 2)
if self.cached_kv:
# Keys and Values are cached and reused at each layer.
assert memory.get_shape().as_list()[1] == 2 * self.model_dimension
kv_tensors = memory
else:
kv_tensors = self.kv_dense_layers(memory, memory_mask,
memory_inverse_normalizer)
else:
kv_tensors = self.kv_dense_layers(inputs_rank2)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
q_tensor = tf.reshape(q_tensor, [bsz, -1, self.num_heads, self.filters])
kv_tensors = tf.reshape(kv_tensors,
[bsz, -1, 2, self.num_heads, self.filters])
kv_tensors = tf.unstack(kv_tensors, axis=2)
else:
q_tensor = tf.split(q_tensor, self.num_heads, axis=1)
kv_tensors = tf.split(kv_tensors, self.num_heads * 2, axis=1)
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
assert attn_mask is not None
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
attn_mask = tf.expand_dims(attn_mask, 1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
queries = tf.transpose(q_tensor, [0, 2, 1, 3])
keys = tf.transpose(kv_tensors[0], [0, 2, 1, 3])
values = tf.transpose(kv_tensors[1], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
# We need to invoke the keras layer before calling APIs that it provides
# such as quantize_using_range.
self.qconcat(None)
context = []
for head in range(self.num_heads):
queries = q_tensor[head]
if self.parameters.mode == base_layers.PREDICT:
# PREDICT mode assumes callers tile and merge beam size with batch
# size. Hence extracting the first entry in the tile to compute
# attention.
keys = tf.split(kv_tensors[head], bsz, axis=0)
keys = keys[0]
values = tf.split(kv_tensors[head + self.num_heads], bsz, axis=0)
values = values[0]
else:
keys = kv_tensors[head]
values = kv_tensors[head + self.num_heads]
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(
self.qconcat.quantize_using_range(tf.matmul(attention, values)))
# Concatenating heads along axis 1.
result = self.qconcat.quantize_using_range(tf.concat(context, axis=1))
return tf.reshape(result, [-1, 1, self.model_dimension])
class DecoderUniformAttention(base_layers.BaseLayer):
"""Decoder uniform attention."""
def __init__(self,
model_dimension,
max_time_step,
attention_dropout_rate=0.0,
beam_size=1,
**kwargs):
self.model_dimension = model_dimension
self.max_time_step = max_time_step
self.beam_size = beam_size
self.causal_mask = tf.expand_dims(
tf.linalg.band_part(tf.ones([max_time_step, max_time_step]), -1, 0), 0)
self.dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension,
activation=None,
normalize=False,
bias=False,
rank=3,
**kwargs)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
super(DecoderUniformAttention, self).__init__(**kwargs)
def get_uniform_attention(self, attn_mask=None):
"""Generates uniform attention matrix using `causal_mask`."""
mask = tf.math.divide_no_nan(
self.causal_mask,
tf.reduce_sum(self.causal_mask, axis=-1, keepdims=True))
if attn_mask is not None:
self._assert_rank_and_type(attn_mask, 3)
mask = mask * attn_mask
return mask
def call(self,
inputs,
mask,
inverse_normalizer,
step=None,
beam_indices=None,
cache=None,
attn_mask=None):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
layer_out = self.dense_layers(inputs, mask, inverse_normalizer)
# TFLite mode is handled with a custom op.
if self.parameters.mode == base_layers.TFLITE:
assert beam_indices is not None
assert step is not None
layer_out = tf_custom_ops_py.uniform_causal_attn(
layer_out, step, beam_indices, self.model_dimension, self.beam_size)
else:
# Cache is used for TF Predict and Eval modes.
if cache is None:
attention_matrix = self.get_uniform_attention(attn_mask)
layer_out = tf.matmul(attention_matrix, layer_out)
else:
assert self.parameters.mode in [base_layers.PREDICT, base_layers.EVAL]
assert step is not None
cache['uniform_avg'] = layer_out + cache['uniform_avg']
layer_out = cache['uniform_avg'] / tf.cast(step, dtype=tf.float32)
return self.qoutput(layer_out)
| 30,161 | 43.817236 | 80 | py |
models | models-master/research/seq_flow_lite/layers/conv_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base layer for convolution."""
import copy
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class EncoderQConvolution(base_layers.BaseLayer):
"""Quantized encoder convolution layers."""
def __init__(self,
filters,
ksize,
stride=1,
padding="SAME",
dilations=None,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=4,
normalization_fn=None,
**kwargs):
self.out_filters = filters
assert rank >= 3 and rank <= 4
self.rank = rank
self.ksize = self._unpack(ksize)
self.strides = self._unpack(stride)
self.dilations = [1] + self._unpack(dilations) + [1] if dilations else None
self.activation = activation
self.bias = bias
self.padding = padding
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(normalization_fn=normalization_fn, **kwargs)
super(EncoderQConvolution, self).__init__(**kwargs)
def _unpack(self, value):
if not isinstance(value, list):
assert isinstance(value, int)
return [1 if self.rank == 3 else value, value]
else:
assert len(value) == 2 and self.rank == 4
assert isinstance(value[0], int) and isinstance(value[1], int)
return value
def build(self, input_shapes):
assert len(input_shapes) == self.rank
self.in_filters = input_shapes[-1]
shape = self.ksize + [self.in_filters, self.out_filters]
self.filters = self.add_weight_wrapper(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.out_filters])
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.BatchNormalization(**kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def _conv_r4(self, inputs, normalize_method):
outputs = tf.nn.conv2d(
inputs,
self.quantize_parameter(self.filters),
strides=self.strides,
padding=self.padding,
dilations=self.dilations)
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
return self.qoutput(outputs)
def _conv_r3(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
inputs_r4 = tf.reshape(inputs, [bsz, 1, -1, self.in_filters])
outputs = self._conv_r4(inputs_r4, normalize_method)
return tf.reshape(outputs, [bsz, -1, self.out_filters])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 3:
return self._conv_r3(inputs, normalize_method)
return self._conv_r4(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class EncoderQConvolutionVarLen(EncoderQConvolution):
"""Convolution on variable length sequence."""
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=4, **kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def call(self, inputs, mask, inverse_normalizer):
def normalize_method(tensor):
return self.normalization(tensor, mask, inverse_normalizer)
return self._do_call(inputs, normalize_method)
| 4,468 | 34.468254 | 80 | py |
models | models-master/research/seq_flow_lite/layers/quantization_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for quantization."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
class ActivationQuantization(base_layers.BaseLayer):
"""A class that applies quantization to a activation tensor."""
def __init__(self, ema_decay=0.99, num_bits=8, **kwargs):
self.ema_decay = ema_decay
self.num_bits = num_bits
super(ActivationQuantization, self).__init__(**kwargs)
def build(self, input_shapes):
if self.parameters.quantize:
self.min_var = self.add_weight(
"min", initializer=tf.keras.initializers.Zeros(), trainable=False)
self.max_var = self.add_weight(
"max", initializer=tf.keras.initializers.Ones(), trainable=False)
def call(self, inputs):
if self.parameters.quantize:
if self.parameters.mode == base_layers.TRAIN:
# Toco expects 0.0 to be part of the quantization range.
batch_min = tf.minimum(tf.reduce_min(inputs), 0.0)
min_var = self.assign_moving_average(self.min_var, batch_min,
self.ema_decay)
batch_max = tf.maximum(tf.reduce_max(inputs), 0.0)
max_var = self.assign_moving_average(self.max_var, batch_max,
self.ema_decay)
with tf.control_dependencies([min_var, max_var]):
return tf.quantization.fake_quant_with_min_max_vars(
inputs, batch_min, batch_max, num_bits=self.num_bits)
else:
return tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var, num_bits=self.num_bits)
return inputs
def quantize_using_range(self, inputs):
# This method can only be called after a call to "call" method in this class
if self.parameters.quantize:
return tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var, num_bits=self.num_bits)
return inputs
class ConcatQuantization(ActivationQuantization):
"""A class that applies quantization to a activation tensor."""
def __init__(self, axis=2, **kwargs):
self.axis = axis
super(ConcatQuantization, self).__init__(**kwargs)
def _reduce_list(self, tensor_list, functor):
reduce_result = [functor(tensor) for tensor in tensor_list]
# Toco expects 0.0 to be part of the quantization range.
reduce_result.append(tf.constant(0.0))
return functor(tf.stack(reduce_result))
def call(self, tensors):
# Ignore empty invocations done to build the keras layer.
if tensors is None:
return
if self.parameters.quantize:
if self.parameters.mode == base_layers.TRAIN:
# Toco expects 0.0 to be part of the quantization range.
batch_min = self._reduce_list(tensors, tf.reduce_min)
min_var = self.assign_moving_average(self.min_var, batch_min,
self.ema_decay)
batch_max = self._reduce_list(tensors, tf.reduce_max)
max_var = self.assign_moving_average(self.max_var, batch_max,
self.ema_decay)
else:
min_var, max_var = self.min_var, self.max_var
tensors = [
tf.quantization.fake_quant_with_min_max_vars(
tensor, min_var, max_var, num_bits=self.num_bits)
for tensor in tensors
]
tensor = tf.concat(tensors, axis=self.axis)
return tf.quantization.fake_quant_with_min_max_vars(
tensor, min_var, max_var, num_bits=self.num_bits)
return tf.concat(tensors, axis=self.axis)
| 4,237 | 40.145631 | 80 | py |
models | models-master/research/seq_flow_lite/demo/colab/setup.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils import spawn
from distutils.command import build
import os
import subprocess
import setuptools
class _BuildCommand(build.build):
sub_commands = [
('bazel_build', lambda self: True),
] + build.build.sub_commands
class _BazelBuildCommand(setuptools.Command):
def initialize_options(self):
pass
def finalize_options(self):
self._bazel_cmd = spawn.find_executable('bazel')
def run(self):
subprocess.check_call(
[self._bazel_cmd, 'run', '-c', 'opt', '//demo/colab:move_ops'],
cwd=os.path.dirname(os.path.realpath(__file__)))
setuptools.setup(
name='seq_flow_lite',
version='0.1',
packages=['tf_ops', 'tflite_ops'],
package_data={'': ['*.so']},
cmdclass={
'build': _BuildCommand,
'bazel_build': _BazelBuildCommand,
},
description='Test')
| 1,534 | 27.425926 | 80 | py |
models | models-master/research/seq_flow_lite/tf_ops/denylist_op_test.py | # Copyright 2022 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test denylist op and show example usage from python wrapper."""
import tensorflow as tf
from tf_ops import denylist_op # import seq_flow_lite module
class SkipgramDenylistTest(tf.test.TestCase):
def test_correct(self):
result = denylist_op.skipgram_denylist(
input=["q a q b q c q", "q a b q q c"],
max_skip_size=1,
denylist=["a b c"],
denylist_category=[1],
categories=2,
negative_categories=1)
self.assertAllEqual(result, [[0.0, 1.0], [1.0, 0.0]])
class SubsequenceDenylistTest(tf.test.TestCase):
def test_correct(self):
result = denylist_op.subsequence_denylist(
input=["qaqbqcq", "qabqqc"],
max_skip_size=1,
denylist=["a b c"],
denylist_category=[1],
categories=2,
negative_categories=1)
self.assertAllEqual(result, [[0.0, 1.0], [1.0, 0.0]])
class TokenizedDenylistTest(tf.test.TestCase):
def test_correct(self):
result = denylist_op.tokenized_denylist(
input=[["q", "a", "q", "b", "q", "c", "q"],
["q", "a", "b", "q", "q", "c", ""]],
token_count=[7, 6],
max_skip_size=1,
denylist=["a b c"],
denylist_category=[1],
categories=2,
negative_categories=1)
self.assertAllEqual(result, [[0.0, 1.0], [1.0, 0.0]])
if __name__ == "__main__":
tf.test.main()
| 2,054 | 31.109375 | 80 | py |
models | models-master/research/seq_flow_lite/utils/misc_utils.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for miscelaneous utils."""
import tensorflow as tf
def random_substr(str_tensor, max_words):
"""Select random substring if the input has more than max_words."""
word_batch_r = tf.strings.split(str_tensor)
row_splits = word_batch_r.row_splits
words = word_batch_r.values
start_idx = row_splits[:-1]
end_idx = row_splits[1:]
words_per_example = end_idx - start_idx
ones = tf.ones_like(end_idx)
max_val = tf.maximum(ones, words_per_example - max_words)
max_words_batch = tf.reduce_max(words_per_example)
rnd = tf.random.uniform(
tf.shape(start_idx), minval=0, maxval=max_words_batch, dtype=tf.int64)
off_start_idx = tf.math.floormod(rnd, max_val)
new_words_per_example = tf.where(
tf.equal(max_val, 1), words_per_example, ones * max_words)
new_start_idx = start_idx + off_start_idx
new_end_idx = new_start_idx + new_words_per_example
indices = tf.expand_dims(tf.range(tf.size(words), dtype=tf.int64), axis=0)
within_limit = tf.logical_and(
tf.greater_equal(indices, tf.expand_dims(new_start_idx, axis=1)),
tf.less(indices, tf.expand_dims(new_end_idx, axis=1)))
keep_indices = tf.reduce_any(within_limit, axis=0)
keep_indices = tf.cast(keep_indices, dtype=tf.int32)
_, selected_words = tf.dynamic_partition(words, keep_indices, 2)
row_splits = tf.math.cumsum(new_words_per_example)
row_splits = tf.concat([[0], row_splits], axis=0)
new_tensor = tf.RaggedTensor.from_row_splits(
values=selected_words, row_splits=row_splits)
return tf.strings.reduce_join(new_tensor, axis=1, separator=" ")
| 2,265 | 45.244898 | 80 | py |
models | models-master/research/seq_flow_lite/utils/tflite_utils.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils to convert to a TFLite model."""
import tensorflow.compat.v1 as tf
def _dump_graph_in_text_format(filename, graph_def):
"""Dump a tensorflow graph in readable text format."""
f = open(filename, 'w')
for node in graph_def.node:
f.write('Node: %s (%s)\n' % (node.name, node.op))
for input_name in node.input:
f.write('\tInput: %s\n' % input_name)
f.close()
def get_mean_stddev_values(min_value_of_features, max_value_of_features):
"""Gets Mean and Stddev values for given min/max float values."""
quant_min = 0
quant_max = 255
min_global = min_value_of_features
max_global = max_value_of_features
quant_min_float = float(quant_min)
quant_max_float = float(quant_max)
nudged_scale = (max_global - min_global) / (quant_max_float - quant_min_float)
zero_point_from_min = quant_min_float - min_global / nudged_scale
if zero_point_from_min < quant_min_float:
nudged_zero_point = int(quant_min)
elif zero_point_from_min > quant_max_float:
nudged_zero_point = int(quant_max)
else:
nudged_zero_point = int(round(zero_point_from_min))
nudged_min = (quant_min_float - nudged_zero_point) * (nudged_scale)
nudged_max = (quant_max_float - nudged_zero_point) * (nudged_scale)
zero_point = (quant_min - min_global) / (max_global - min_global) * quant_max
scale = (nudged_max - nudged_min) / 255.0
mean_value = zero_point
stddev_value = 1 / scale
return mean_value, stddev_value
class InterpreterWithCustomOps(tf.lite.Interpreter):
"""Extended tf.lite.Interpreter."""
def __init__(self,
model_content,
custom_op_registerers=None,
experimental_preserve_all_tensors=False):
self._custom_op_registerers = custom_op_registerers or []
super(InterpreterWithCustomOps, self).__init__(
model_content=model_content,
experimental_preserve_all_tensors=experimental_preserve_all_tensors)
def op_details(self):
op_details = {}
try:
op_details = self._get_ops_details() # Accessing experimental method.
except AttributeError:
print('Unable to access op details')
return op_details
def op_histogram(self):
op_hist = {}
op_list = self.op_details()
for op in op_list:
if op['op_name'] in op_hist:
op_hist[op['op_name']] += 1
else:
op_hist[op['op_name']] = 1
return op_hist
def check_op_histogram(self, expected):
passed = True
for k, v in self.op_histogram().items():
if k not in expected:
print('Unexpected key {} found {} times.'.format(k, v))
passed = False
continue
elif expected[k] != v:
print('Expected {} counts of key {} found {}.'.format(
expected[k], k, v))
passed = False
del expected[k]
for k, v in expected.items():
print('Missing expected key {} value {}.'.format(k, v))
passed = False
return passed
def set_output_quantized_for_custom_ops(graph_def, use_mlir=True):
"""Set output types/quantized flag for custom/unsupported ops."""
quantized_custom_ops = {
'SequenceStringProjection': [tf.float32.as_datatype_enum],
'SequenceStringProjectionV2': [tf.float32.as_datatype_enum],
'PoolingOp': [tf.float32.as_datatype_enum],
'ExpectedValueOp': [tf.float32.as_datatype_enum],
'LayerNorm': [tf.float32.as_datatype_enum],
'UniformCausalAttn': [tf.float32.as_datatype_enum],
'DynamicUniformCausalAttn': [tf.float32.as_datatype_enum],
'RnnDecoderReadState': [tf.float32.as_datatype_enum],
'RnnDecoderWriteState': [tf.float32.as_datatype_enum],
}
custom_op_renames = {
'SequenceStringProjection': 'SEQUENCE_STRING_PROJECTION',
'SequenceStringProjectionV2': 'SEQUENCE_STRING_PROJECTION_V2',
}
for node in graph_def.node:
if node.op in quantized_custom_ops:
if use_mlir:
node.attr['_tfl_quant_trait'].s = str.encode('fully_quantizable')
else:
node.attr['_output_quantized'].b = True
node.attr['_output_types'].list.type[:] = quantized_custom_ops[node.op]
if not use_mlir and node.op in custom_op_renames:
node.op = custom_op_renames[node.op]
def generate_tflite(session,
graph,
input_tensors,
output_tensors,
use_mlir=True):
"""Generate TFLite model from a session, graph and input/output tensors."""
output_nodes = [tensor.name.split(':')[0] for tensor in output_tensors]
graph_def = tf.graph_util.convert_variables_to_constants(
session, graph.as_graph_def(), output_nodes)
set_output_quantized_for_custom_ops(graph_def, use_mlir)
converter = tf.lite.TFLiteConverter(graph_def, input_tensors, output_tensors)
converter.inference_type = tf.uint8
converter.default_ranges_stats = (127.5, 127.5)
converter.quantized_input_stats = {
tensor.op.name: (127.5, 127.5) for tensor in input_tensors
}
converter.allow_custom_ops = True
converter.experimental_new_converter = use_mlir
return converter.convert()
| 5,759 | 34.555556 | 80 | py |
models | models-master/research/attention_ocr/python/inception_preprocessing.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
# TODO(gorban): add as a dependency, when slim or tensorflow/models are pipfied
# Source:
# https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random.uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)
])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather than adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.compat.v1.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the
whole image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.compat.v1.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
image_size=tf.shape(input=image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image,
height,
width,
bbox,
fast_mode=True,
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.compat.v1.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant(
[0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), bbox)
tf.compat.v1.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.compat.v1.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize(x, [height, width], method=method),
num_cases=num_resize_cases)
tf.compat.v1.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 4 ways to do it.
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf.compat.v1.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image,
height,
width,
central_fraction=0.875,
scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.compat.v1.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image,
height,
width,
is_training=False,
bbox=None,
fast_mode=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image.
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode)
else:
return preprocess_for_eval(image, height, width)
| 13,787 | 42.632911 | 113 | py |
models | models-master/research/attention_ocr/python/data_provider_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_provider."""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim import queues
import datasets
import data_provider
class DataProviderTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
def test_preprocessed_image_values_are_in_range(self):
image_shape = (5, 4, 3)
fake_image = np.random.randint(low=0, high=255, size=image_shape)
image_tf = data_provider.preprocess_image(fake_image)
with self.test_session() as sess:
image_np = sess.run(image_tf)
self.assertEqual(image_np.shape, image_shape)
min_value, max_value = np.min(image_np), np.max(image_np)
self.assertTrue((-1.28 < min_value) and (min_value < 1.27))
self.assertTrue((-1.28 < max_value) and (max_value < 1.27))
def test_provided_data_has_correct_shape(self):
batch_size = 4
data = data_provider.get_data(
dataset=datasets.fsns_test.get_test_split(),
batch_size=batch_size,
augment=True,
central_crop_size=None)
with self.test_session() as sess, queues.QueueRunners(sess):
images_np, labels_np = sess.run([data.images, data.labels_one_hot])
self.assertEqual(images_np.shape, (batch_size, 150, 600, 3))
self.assertEqual(labels_np.shape, (batch_size, 37, 134))
def test_optionally_applies_central_crop(self):
batch_size = 4
data = data_provider.get_data(
dataset=datasets.fsns_test.get_test_split(),
batch_size=batch_size,
augment=True,
central_crop_size=(500, 100))
with self.test_session() as sess, queues.QueueRunners(sess):
images_np = sess.run(data.images)
self.assertEqual(images_np.shape, (batch_size, 100, 500, 3))
if __name__ == '__main__':
tf.test.main()
| 2,448 | 32.547945 | 80 | py |
models | models-master/research/attention_ocr/python/demo_inference_test.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import demo_inference
import tensorflow as tf
from tensorflow.python.training import monitored_session
from tensorflow.compat.v1 import flags
_CHECKPOINT = 'model.ckpt-399731'
_CHECKPOINT_URL = 'http://download.tensorflow.org/models/attention_ocr_2017_08_09.tar.gz'
class DemoInferenceTest(tf.test.TestCase):
def setUp(self):
super(DemoInferenceTest, self).setUp()
for suffix in ['.meta', '.index', '.data-00000-of-00001']:
filename = _CHECKPOINT + suffix
self.assertTrue(tf.io.gfile.exists(filename),
msg='Missing checkpoint file %s. '
'Please download and extract it from %s' %
(filename, _CHECKPOINT_URL))
self._batch_size = 32
flags.FLAGS.dataset_dir = os.path.join(
os.path.dirname(__file__), 'datasets/testdata/fsns')
def test_moving_variables_properly_loaded_from_a_checkpoint(self):
batch_size = 32
dataset_name = 'fsns'
images_placeholder, endpoints = demo_inference.create_model(batch_size,
dataset_name)
image_path_pattern = 'testdata/fsns_train_%02d.png'
images_data = demo_inference.load_images(image_path_pattern, batch_size,
dataset_name)
tensor_name = 'AttentionOcr_v1/conv_tower_fn/INCE/InceptionV3/Conv2d_2a_3x3/BatchNorm/moving_mean'
moving_mean_tf = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name + ':0')
reader = tf.compat.v1.train.NewCheckpointReader(_CHECKPOINT)
moving_mean_expected = reader.get_tensor(tensor_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=_CHECKPOINT)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
moving_mean_np = sess.run(moving_mean_tf,
feed_dict={images_placeholder: images_data})
self.assertAllEqual(moving_mean_expected, moving_mean_np)
def test_correct_results_on_test_data(self):
image_path_pattern = 'testdata/fsns_train_%02d.png'
predictions = demo_inference.run(_CHECKPOINT, self._batch_size,
'fsns',
image_path_pattern)
self.assertEqual([
u'Boulevard de Lunel░░░░░░░░░░░░░░░░░░░',
'Rue de Provence░░░░░░░░░░░░░░░░░░░░░░',
'Rue de Port Maria░░░░░░░░░░░░░░░░░░░░',
'Avenue Charles Gounod░░░░░░░░░░░░░░░░',
'Rue de l‘Aurore░░░░░░░░░░░░░░░░░░░░░░',
'Rue de Beuzeville░░░░░░░░░░░░░░░░░░░░',
'Rue d‘Orbey░░░░░░░░░░░░░░░░░░░░░░░░░░',
'Rue Victor Schoulcher░░░░░░░░░░░░░░░░',
'Rue de la Gare░░░░░░░░░░░░░░░░░░░░░░░',
'Rue des Tulipes░░░░░░░░░░░░░░░░░░░░░░',
'Rue André Maginot░░░░░░░░░░░░░░░░░░░░',
'Route de Pringy░░░░░░░░░░░░░░░░░░░░░░',
'Rue des Landelles░░░░░░░░░░░░░░░░░░░░',
'Rue des Ilettes░░░░░░░░░░░░░░░░░░░░░░',
'Avenue de Maurin░░░░░░░░░░░░░░░░░░░░░',
'Rue Théresa░░░░░░░░░░░░░░░░░░░░░░░░░░', # GT='Rue Thérésa'
'Route de la Balme░░░░░░░░░░░░░░░░░░░░',
'Rue Hélène Roederer░░░░░░░░░░░░░░░░░░',
'Rue Emile Bernard░░░░░░░░░░░░░░░░░░░░',
'Place de la Mairie░░░░░░░░░░░░░░░░░░░',
'Rue des Perrots░░░░░░░░░░░░░░░░░░░░░░',
'Rue de la Libération░░░░░░░░░░░░░░░░░',
'Impasse du Capcir░░░░░░░░░░░░░░░░░░░░',
'Avenue de la Grand Mare░░░░░░░░░░░░░░',
'Rue Pierre Brossolette░░░░░░░░░░░░░░░',
'Rue de Provence░░░░░░░░░░░░░░░░░░░░░░',
'Rue du Docteur Mourre░░░░░░░░░░░░░░░░',
'Rue d‘Ortheuil░░░░░░░░░░░░░░░░░░░░░░░',
'Rue des Sarments░░░░░░░░░░░░░░░░░░░░░',
'Rue du Centre░░░░░░░░░░░░░░░░░░░░░░░░',
'Impasse Pierre Mourgues░░░░░░░░░░░░░░',
'Rue Marcel Dassault░░░░░░░░░░░░░░░░░░'
], predictions)
if __name__ == '__main__':
tf.test.main()
| 4,032 | 42.836957 | 102 | py |
models | models-master/research/attention_ocr/python/model_export_lib.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for exporting Attention OCR model."""
import tensorflow as tf
# Function borrowed from research/object_detection/core/preprocessor.py
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width,
channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.compat.v1.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.cast(image, dtype=tf.float32)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def generate_tfexample_image(input_example_strings,
image_height,
image_width,
image_channels,
name=None):
"""Parses a 1D tensor of serialized tf.Example protos and returns image batch.
Args:
input_example_strings: A 1-Dimensional tensor of size [batch_size] and type
tf.string containing a serialized Example proto per image.
image_height: First image dimension.
image_width: Second image dimension.
image_channels: Third image dimension.
name: optional tensor name.
Returns:
A tensor with shape [batch_size, height, width, channels] of type float32
with values in the range [0..1]
"""
batch_size = tf.shape(input=input_example_strings)[0]
images_shape = tf.stack(
[batch_size, image_height, image_width, image_channels])
tf_example_image_key = 'image/encoded'
feature_configs = {
tf_example_image_key:
tf.io.FixedLenFeature(
image_height * image_width * image_channels, dtype=tf.float32)
}
feature_tensors = tf.io.parse_example(
serialized=input_example_strings, features=feature_configs)
float_images = tf.reshape(
normalize_image(
feature_tensors[tf_example_image_key],
original_minval=0.0,
original_maxval=255.0,
target_minval=0.0,
target_maxval=1.0),
images_shape,
name=name)
return float_images
def attention_ocr_attention_masks(num_characters):
# TODO(gorban): use tensors directly after replacing LSTM unroll methods.
prefix = ('AttentionOcr_v1/'
'sequence_logit_fn/SQLR/LSTM/attention_decoder/Attention_0')
names = ['%s/Softmax:0' % (prefix)]
for i in range(1, num_characters):
names += ['%s_%d/Softmax:0' % (prefix, i)]
return [tf.compat.v1.get_default_graph().get_tensor_by_name(n) for n in names]
def build_tensor_info(tensor_dict):
return {
k: tf.compat.v1.saved_model.utils.build_tensor_info(t)
for k, t in tensor_dict.items()
}
| 4,112 | 36.733945 | 80 | py |
models | models-master/research/attention_ocr/python/model_export.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts existing checkpoint into a SavedModel.
Usage example:
python model_export.py \
--logtostderr --checkpoint=model.ckpt-399731 \
--export_dir=/tmp/attention_ocr_export
"""
import os
import tensorflow as tf
from tensorflow import app
from tensorflow.contrib import slim
from tensorflow.compat.v1 import flags
import common_flags
import model_export_lib
FLAGS = flags.FLAGS
common_flags.define()
flags.DEFINE_string('export_dir', None, 'Directory to export model files to.')
flags.DEFINE_integer(
'image_width', None,
'Image width used during training (or crop width if used)'
' If not set, the dataset default is used instead.')
flags.DEFINE_integer(
'image_height', None,
'Image height used during training(or crop height if used)'
' If not set, the dataset default is used instead.')
flags.DEFINE_string('work_dir', '/tmp',
'A directory to store temporary files.')
flags.DEFINE_integer('version_number', 1, 'Version number of the model')
flags.DEFINE_bool(
'export_for_serving', True,
'Whether the exported model accepts serialized tf.Example '
'protos as input')
def get_checkpoint_path():
"""Returns a path to a checkpoint based on specified commandline flags.
In order to specify a full path to a checkpoint use --checkpoint flag.
Alternatively, if --train_log_dir was specified it will return a path to the
most recent checkpoint.
Raises:
ValueError: in case it can't find a checkpoint.
Returns:
A string.
"""
if FLAGS.checkpoint:
return FLAGS.checkpoint
else:
model_save_path = tf.train.latest_checkpoint(FLAGS.train_log_dir)
if not model_save_path:
raise ValueError('Can\'t find a checkpoint in: %s' % FLAGS.train_log_dir)
return model_save_path
def export_model(export_dir,
export_for_serving,
batch_size=None,
crop_image_width=None,
crop_image_height=None):
"""Exports a model to the named directory.
Note that --datatset_name and --checkpoint are required and parsed by the
underlying module common_flags.
Args:
export_dir: The output dir where model is exported to.
export_for_serving: If True, expects a serialized image as input and attach
image normalization as part of exported graph.
batch_size: For non-serving export, the input batch_size needs to be
specified.
crop_image_width: Width of the input image. Uses the dataset default if
None.
crop_image_height: Height of the input image. Uses the dataset default if
None.
Returns:
Returns the model signature_def.
"""
# Dataset object used only to get all parameters for the model.
dataset = common_flags.create_dataset(split_name='test')
model = common_flags.create_model(
dataset.num_char_classes,
dataset.max_sequence_length,
dataset.num_of_views,
dataset.null_code,
charset=dataset.charset)
dataset_image_height, dataset_image_width, image_depth = dataset.image_shape
# Add check for charmap file
if not os.path.exists(dataset.charset_file):
raise ValueError('No charset defined at {}: export will fail'.format(
dataset.charset))
# Default to dataset dimensions, otherwise use provided dimensions.
image_width = crop_image_width or dataset_image_width
image_height = crop_image_height or dataset_image_height
if export_for_serving:
images_orig = tf.compat.v1.placeholder(
tf.string, shape=[batch_size], name='tf_example')
images_orig_float = model_export_lib.generate_tfexample_image(
images_orig,
image_height,
image_width,
image_depth,
name='float_images')
else:
images_shape = (batch_size, image_height, image_width, image_depth)
images_orig = tf.compat.v1.placeholder(
tf.uint8, shape=images_shape, name='original_image')
images_orig_float = tf.image.convert_image_dtype(
images_orig, dtype=tf.float32, name='float_images')
endpoints = model.create_base(images_orig_float, labels_one_hot=None)
sess = tf.compat.v1.Session()
saver = tf.compat.v1.train.Saver(
slim.get_variables_to_restore(), sharded=True)
saver.restore(sess, get_checkpoint_path())
tf.compat.v1.logging.info('Model restored successfully.')
# Create model signature.
if export_for_serving:
input_tensors = {
tf.saved_model.CLASSIFY_INPUTS: images_orig
}
else:
input_tensors = {'images': images_orig}
signature_inputs = model_export_lib.build_tensor_info(input_tensors)
# NOTE: Tensors 'image_float' and 'chars_logit' are used by the inference
# or to compute saliency maps.
output_tensors = {
'images_float': images_orig_float,
'predictions': endpoints.predicted_chars,
'scores': endpoints.predicted_scores,
'chars_logit': endpoints.chars_logit,
'predicted_length': endpoints.predicted_length,
'predicted_text': endpoints.predicted_text,
'predicted_conf': endpoints.predicted_conf,
'normalized_seq_conf': endpoints.normalized_seq_conf
}
for i, t in enumerate(
model_export_lib.attention_ocr_attention_masks(
dataset.max_sequence_length)):
output_tensors['attention_mask_%d' % i] = t
signature_outputs = model_export_lib.build_tensor_info(output_tensors)
signature_def = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
tf.saved_model.CLASSIFY_METHOD_NAME)
# Save model.
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.SERVING],
signature_def_map={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
},
main_op=tf.compat.v1.tables_initializer(),
strip_default_attrs=True)
builder.save()
tf.compat.v1.logging.info('Model has been exported to %s' % export_dir)
return signature_def
def main(unused_argv):
if os.path.exists(FLAGS.export_dir):
raise ValueError('export_dir already exists: exporting will fail')
export_model(FLAGS.export_dir, FLAGS.export_for_serving, FLAGS.batch_size,
FLAGS.image_width, FLAGS.image_height)
if __name__ == '__main__':
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('export_dir')
app.run(main)
| 7,100 | 34.683417 | 83 | py |
models | models-master/research/attention_ocr/python/model_export_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_export."""
import os
import numpy as np
from absl.testing import flagsaver
import tensorflow as tf
from tensorflow.compat.v1 import flags
import common_flags
import model_export
_CHECKPOINT = 'model.ckpt-399731'
_CHECKPOINT_URL = (
'http://download.tensorflow.org/models/attention_ocr_2017_08_09.tar.gz')
def _clean_up():
tf.io.gfile.rmtree(tf.compat.v1.test.get_temp_dir())
def _create_tf_example_string(image):
"""Create a serialized tf.Example proto for feeding the model."""
example = tf.train.Example()
example.features.feature['image/encoded'].float_list.value.extend(
list(np.reshape(image, (-1))))
return example.SerializeToString()
class AttentionOcrExportTest(tf.test.TestCase):
"""Tests for model_export.export_model."""
def setUp(self):
for suffix in ['.meta', '.index', '.data-00000-of-00001']:
filename = _CHECKPOINT + suffix
self.assertTrue(
tf.io.gfile.exists(filename),
msg='Missing checkpoint file %s. '
'Please download and extract it from %s' %
(filename, _CHECKPOINT_URL))
flags.FLAGS.dataset_name = 'fsns'
flags.FLAGS.checkpoint = _CHECKPOINT
flags.FLAGS.dataset_dir = os.path.join(
os.path.dirname(__file__), 'datasets/testdata/fsns')
tf.test.TestCase.setUp(self)
_clean_up()
self.export_dir = os.path.join(
tf.compat.v1.test.get_temp_dir(), 'exported_model')
self.minimal_output_signature = {
'predictions': 'AttentionOcr_v1/predicted_chars:0',
'scores': 'AttentionOcr_v1/predicted_scores:0',
'predicted_length': 'AttentionOcr_v1/predicted_length:0',
'predicted_text': 'AttentionOcr_v1/predicted_text:0',
'predicted_conf': 'AttentionOcr_v1/predicted_conf:0',
'normalized_seq_conf': 'AttentionOcr_v1/normalized_seq_conf:0'
}
def create_input_feed(self, graph_def, serving):
"""Returns the input feed for the model.
Creates random images, according to the size specified by dataset_name,
format it in the correct way depending on whether the model was exported
for serving, and return the correctly keyed feed_dict for inference.
Args:
graph_def: Graph definition of the loaded model.
serving: Whether the model was exported for Serving.
Returns:
The feed_dict suitable for model inference.
"""
# Creates a dataset based on FLAGS.dataset_name.
self.dataset = common_flags.create_dataset('test')
# Create some random images to test inference for any dataset.
self.images = {
'img1':
np.random.uniform(low=64, high=192,
size=self.dataset.image_shape).astype('uint8'),
'img2':
np.random.uniform(low=32, high=224,
size=self.dataset.image_shape).astype('uint8'),
}
signature_def = graph_def.signature_def[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
if serving:
input_name = signature_def.inputs[
tf.saved_model.CLASSIFY_INPUTS].name
# Model for serving takes input: inputs['inputs'] = 'tf_example:0'
feed_dict = {
input_name: [
_create_tf_example_string(self.images['img1']),
_create_tf_example_string(self.images['img2'])
]
}
else:
input_name = signature_def.inputs['images'].name
# Model for direct use takes input: inputs['images'] = 'original_image:0'
feed_dict = {
input_name: np.stack([self.images['img1'], self.images['img2']])
}
return feed_dict
def verify_export_load_and_inference(self, export_for_serving=False):
"""Verify exported model can be loaded and inference can run successfully.
This function will load the exported model in self.export_dir, then create
some fake images according to the specification of FLAGS.dataset_name.
It then feeds the input through the model, and verify the minimal set of
output signatures are present.
Note: Model and dataset creation in the underlying library depends on the
following commandline flags:
FLAGS.dataset_name
Args:
export_for_serving: True if the model was exported for Serving. This
affects how input is fed into the model.
"""
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
graph_def = tf.compat.v1.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.SERVING],
export_dir=self.export_dir)
feed_dict = self.create_input_feed(graph_def, export_for_serving)
results = sess.run(self.minimal_output_signature, feed_dict=feed_dict)
out_shape = (2,)
self.assertEqual(np.shape(results['predicted_conf']), out_shape)
self.assertEqual(np.shape(results['predicted_text']), out_shape)
self.assertEqual(np.shape(results['predicted_length']), out_shape)
self.assertEqual(np.shape(results['normalized_seq_conf']), out_shape)
out_shape = (2, self.dataset.max_sequence_length)
self.assertEqual(np.shape(results['scores']), out_shape)
self.assertEqual(np.shape(results['predictions']), out_shape)
@flagsaver.flagsaver
def test_fsns_export_for_serving_and_load_inference(self):
model_export.export_model(self.export_dir, True)
self.verify_export_load_and_inference(True)
@flagsaver.flagsaver
def test_fsns_export_and_load_inference(self):
model_export.export_model(self.export_dir, False, batch_size=2)
self.verify_export_load_and_inference(False)
if __name__ == '__main__':
tf.test.main()
| 6,286 | 37.808642 | 80 | py |
models | models-master/research/attention_ocr/python/utils.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to support building models for StreetView text transcription."""
import tensorflow as tf
from tensorflow.contrib import slim
def logits_to_log_prob(logits):
"""Computes log probabilities using numerically stable trick.
This uses two numerical stability tricks:
1) softmax(x) = softmax(x - c) where c is a constant applied to all
arguments. If we set c = max(x) then the softmax is more numerically
stable.
2) log softmax(x) is not numerically stable, but we can stabilize it
by using the identity log softmax(x) = x - log sum exp(x)
Args:
logits: Tensor of arbitrary shape whose last dimension contains logits.
Returns:
A tensor of the same shape as the input, but with corresponding log
probabilities.
"""
with tf.compat.v1.variable_scope('log_probabilities'):
reduction_indices = len(logits.shape.as_list()) - 1
max_logits = tf.reduce_max(
input_tensor=logits, axis=reduction_indices, keepdims=True)
safe_logits = tf.subtract(logits, max_logits)
sum_exp = tf.reduce_sum(
input_tensor=tf.exp(safe_logits),
axis=reduction_indices,
keepdims=True)
log_probs = tf.subtract(safe_logits, tf.math.log(sum_exp))
return log_probs
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()}
def ConvertAllInputsToTensors(func):
"""A decorator to convert all function's inputs into tensors.
Args:
func: a function to decorate.
Returns:
A decorated function.
"""
def FuncWrapper(*args):
tensors = [tf.convert_to_tensor(value=a) for a in args]
return func(*tensors)
return FuncWrapper
| 3,217 | 31.836735 | 80 | py |
models | models-master/research/attention_ocr/python/model.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build the Attention OCR model.
Usage example:
ocr_model = model.Model(num_char_classes, seq_length, num_of_views)
data = ... # create namedtuple InputEndpoints
endpoints = model.create_base(data.images, data.labels_one_hot)
# endpoints.predicted_chars is a tensor with predicted character codes.
total_loss = model.create_loss(data, endpoints)
"""
import sys
import collections
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim.nets import inception
import metrics
import sequence_layers
import utils
OutputEndpoints = collections.namedtuple('OutputEndpoints', [
'chars_logit', 'chars_log_prob', 'predicted_chars', 'predicted_scores',
'predicted_text', 'predicted_length', 'predicted_conf',
'normalized_seq_conf'
])
# TODO(gorban): replace with tf.HParams when it is released.
ModelParams = collections.namedtuple(
'ModelParams', ['num_char_classes', 'seq_length', 'num_views', 'null_code'])
ConvTowerParams = collections.namedtuple('ConvTowerParams', ['final_endpoint'])
SequenceLogitsParams = collections.namedtuple('SequenceLogitsParams', [
'use_attention', 'use_autoregression', 'num_lstm_units', 'weight_decay',
'lstm_state_clip_value'
])
SequenceLossParams = collections.namedtuple(
'SequenceLossParams',
['label_smoothing', 'ignore_nulls', 'average_across_timesteps'])
EncodeCoordinatesParams = collections.namedtuple('EncodeCoordinatesParams',
['enabled'])
def _dict_to_array(id_to_char, default_character):
num_char_classes = max(id_to_char.keys()) + 1
array = [default_character] * num_char_classes
for k, v in id_to_char.items():
array[k] = v
return array
class CharsetMapper(object):
"""A simple class to map tensor ids into strings.
It works only when the character set is 1:1 mapping between individual
characters and individual ids.
Make sure you call tf.tables_initializer().run() as part of the init op.
"""
def __init__(self, charset, default_character='?'):
"""Creates a lookup table.
Args:
charset: a dictionary with id-to-character mapping.
"""
mapping_strings = tf.constant(_dict_to_array(charset, default_character))
self.table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_character)
def get_text(self, ids):
"""Returns a string corresponding to a sequence of character ids.
Args:
ids: a tensor with shape [batch_size, max_sequence_length]
"""
return tf.strings.reduce_join(
inputs=self.table.lookup(tf.cast(ids, dtype=tf.int64)), axis=1)
def get_softmax_loss_fn(label_smoothing):
"""Returns sparse or dense loss function depending on the label_smoothing.
Args:
label_smoothing: weight for label smoothing
Returns:
a function which takes labels and predictions as arguments and returns
a softmax loss for the selected type of labels (sparse or dense).
"""
if label_smoothing > 0:
def loss_fn(labels, logits):
return (tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.stop_gradient(labels)))
else:
def loss_fn(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return loss_fn
def get_tensor_dimensions(tensor):
"""Returns the shape components of a 4D tensor with variable batch size.
Args:
tensor : A 4D tensor, whose last 3 dimensions are known at graph
construction time.
Returns:
batch_size : The first dimension as a tensor object.
height : The second dimension as a scalar value.
width : The third dimension as a scalar value.
num_features : The forth dimension as a scalar value.
Raises:
ValueError: if input tensor does not have 4 dimensions.
"""
if len(tensor.get_shape().dims) != 4:
raise ValueError(
'Incompatible shape: len(tensor.get_shape().dims) != 4 (%d != 4)' %
len(tensor.get_shape().dims))
batch_size = tf.shape(input=tensor)[0]
height = tensor.get_shape().dims[1].value
width = tensor.get_shape().dims[2].value
num_features = tensor.get_shape().dims[3].value
return batch_size, height, width, num_features
def lookup_indexed_value(indices, row_vecs):
"""Lookup values in each row of 'row_vecs' indexed by 'indices'.
For each sample in the batch, look up the element for the corresponding
index.
Args:
indices : A tensor of shape (batch, )
row_vecs : A tensor of shape [batch, depth]
Returns:
A tensor of shape (batch, ) formed by row_vecs[i, indices[i]].
"""
gather_indices = tf.stack((tf.range(
tf.shape(input=row_vecs)[0], dtype=tf.int32), tf.cast(indices, tf.int32)),
axis=1)
return tf.gather_nd(row_vecs, gather_indices)
@utils.ConvertAllInputsToTensors
def max_char_logprob_cumsum(char_log_prob):
"""Computes the cumulative sum of character logprob for all sequence lengths.
Args:
char_log_prob: A tensor of shape [batch x seq_length x num_char_classes]
with log probabilities of a character.
Returns:
A tensor of shape [batch x (seq_length+1)] where each element x[_, j] is
the sum of the max char logprob for all positions upto j.
Note this duplicates the final column and produces (seq_length+1) columns
so the same function can be used regardless whether use_length_predictions
is true or false.
"""
max_char_log_prob = tf.reduce_max(input_tensor=char_log_prob, axis=2)
# For an input array [a, b, c]) tf.cumsum returns [a, a + b, a + b + c] if
# exclusive set to False (default).
return tf.cumsum(max_char_log_prob, axis=1, exclusive=False)
def find_length_by_null(predicted_chars, null_code):
"""Determine sequence length by finding null_code among predicted char IDs.
Given the char class ID for each position, compute the sequence length.
Note that this function computes this based on the number of null_code,
instead of the position of the first null_code.
Args:
predicted_chars: A tensor of [batch x seq_length] where each element stores
the char class ID with max probability;
null_code: an int32, character id for the NULL.
Returns:
A [batch, ] tensor which stores the sequence length for each sample.
"""
return tf.reduce_sum(
input_tensor=tf.cast(tf.not_equal(null_code, predicted_chars), tf.int32), axis=1)
def axis_pad(tensor, axis, before=0, after=0, constant_values=0.0):
"""Pad a tensor with the specified values along a single axis.
Args:
tensor: a Tensor;
axis: the dimension to add pad along to;
before: number of values to add before the contents of tensor in the
selected dimension;
after: number of values to add after the contents of tensor in the selected
dimension;
constant_values: the scalar pad value to use. Must be same type as tensor.
Returns:
A Tensor. Has the same type as the input tensor, but with a changed shape
along the specified dimension.
"""
if before == 0 and after == 0:
return tensor
ndims = tensor.shape.ndims
padding_size = np.zeros((ndims, 2), dtype='int32')
padding_size[axis] = before, after
return tf.pad(
tensor=tensor,
paddings=tf.constant(padding_size),
constant_values=constant_values)
def null_based_length_prediction(chars_log_prob, null_code):
"""Computes length and confidence of prediction based on positions of NULLs.
Args:
chars_log_prob: A tensor of shape [batch x seq_length x num_char_classes]
with log probabilities of a character;
null_code: an int32, character id for the NULL.
Returns:
A tuple (text_log_prob, predicted_length), where
text_log_prob - is a tensor of the same shape as length_log_prob.
Element #0 of the output corresponds to probability of the empty string,
element #seq_length - is the probability of length=seq_length.
predicted_length is a tensor with shape [batch].
"""
predicted_chars = tf.cast(
tf.argmax(input=chars_log_prob, axis=2), dtype=tf.int32)
# We do right pad to support sequences with seq_length elements.
text_log_prob = max_char_logprob_cumsum(
axis_pad(chars_log_prob, axis=1, after=1))
predicted_length = find_length_by_null(predicted_chars, null_code)
return text_log_prob, predicted_length
class Model(object):
"""Class to create the Attention OCR Model."""
def __init__(self,
num_char_classes,
seq_length,
num_views,
null_code,
mparams=None,
charset=None):
"""Initialized model parameters.
Args:
num_char_classes: size of character set.
seq_length: number of characters in a sequence.
num_views: Number of views (conv towers) to use.
null_code: A character code corresponding to a character which indicates
end of a sequence.
mparams: a dictionary with hyper parameters for methods, keys - function
names, values - corresponding namedtuples.
charset: an optional dictionary with a mapping between character ids and
utf8 strings. If specified the OutputEndpoints.predicted_text will utf8
encoded strings corresponding to the character ids returned by
OutputEndpoints.predicted_chars (by default the predicted_text contains
an empty vector).
NOTE: Make sure you call tf.tables_initializer().run() if the charset
specified.
"""
super(Model, self).__init__()
self._params = ModelParams(
num_char_classes=num_char_classes,
seq_length=seq_length,
num_views=num_views,
null_code=null_code)
self._mparams = self.default_mparams()
if mparams:
self._mparams.update(mparams)
self._charset = charset
def default_mparams(self):
return {
'conv_tower_fn':
ConvTowerParams(final_endpoint='Mixed_5d'),
'sequence_logit_fn':
SequenceLogitsParams(
use_attention=True,
use_autoregression=True,
num_lstm_units=256,
weight_decay=0.00004,
lstm_state_clip_value=10.0),
'sequence_loss_fn':
SequenceLossParams(
label_smoothing=0.1,
ignore_nulls=True,
average_across_timesteps=False),
'encode_coordinates_fn':
EncodeCoordinatesParams(enabled=False)
}
def set_mparam(self, function, **kwargs):
self._mparams[function] = self._mparams[function]._replace(**kwargs)
def conv_tower_fn(self, images, is_training=True, reuse=None):
"""Computes convolutional features using the InceptionV3 model.
Args:
images: A tensor of shape [batch_size, height, width, channels].
is_training: whether is training or not.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of
output feature map and N is number of output features (depends on the
network architecture).
"""
mparams = self._mparams['conv_tower_fn']
logging.debug('Using final_endpoint=%s', mparams.final_endpoint)
with tf.compat.v1.variable_scope('conv_tower_fn/INCE'):
if reuse:
tf.compat.v1.get_variable_scope().reuse_variables()
with slim.arg_scope(inception.inception_v3_arg_scope()):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint)
return net
def _create_lstm_inputs(self, net):
"""Splits an input tensor into a list of tensors (features).
Args:
net: A feature map of shape [batch_size, num_features, feature_size].
Raises:
AssertionError: if num_features is less than seq_length.
Returns:
A list with seq_length tensors of shape [batch_size, feature_size]
"""
num_features = net.get_shape().dims[1].value
if num_features < self._params.seq_length:
raise AssertionError(
'Incorrect dimension #1 of input tensor'
' %d should be bigger than %d (shape=%s)' %
(num_features, self._params.seq_length, net.get_shape()))
elif num_features > self._params.seq_length:
logging.warning('Ignoring some features: use %d of %d (shape=%s)',
self._params.seq_length, num_features, net.get_shape())
net = tf.slice(net, [0, 0, 0], [-1, self._params.seq_length, -1])
return tf.unstack(net, axis=1)
def sequence_logit_fn(self, net, labels_one_hot):
mparams = self._mparams['sequence_logit_fn']
# TODO(gorban): remove /alias suffixes from the scopes.
with tf.compat.v1.variable_scope('sequence_logit_fn/SQLR'):
layer_class = sequence_layers.get_layer_class(mparams.use_attention,
mparams.use_autoregression)
layer = layer_class(net, labels_one_hot, self._params, mparams)
return layer.create_logits()
def max_pool_views(self, nets_list):
"""Max pool across all nets in spatial dimensions.
Args:
nets_list: A list of 4D tensors with identical size.
Returns:
A tensor with the same size as any input tensors.
"""
batch_size, height, width, num_features = [
d.value for d in nets_list[0].get_shape().dims
]
xy_flat_shape = (batch_size, 1, height * width, num_features)
nets_for_merge = []
with tf.compat.v1.variable_scope('max_pool_views', values=nets_list):
for net in nets_list:
nets_for_merge.append(tf.reshape(net, xy_flat_shape))
merged_net = tf.concat(nets_for_merge, 1)
net = slim.max_pool2d(
merged_net, kernel_size=[len(nets_list), 1], stride=1)
net = tf.reshape(net, (batch_size, height, width, num_features))
return net
def pool_views_fn(self, nets):
"""Combines output of multiple convolutional towers into a single tensor.
It stacks towers one on top another (in height dim) in a 4x1 grid.
The order is arbitrary design choice and shouldn't matter much.
Args:
nets: list of tensors of shape=[batch_size, height, width, num_features].
Returns:
A tensor of shape [batch_size, seq_length, features_size].
"""
with tf.compat.v1.variable_scope('pool_views_fn/STCK'):
net = tf.concat(nets, 1)
batch_size = tf.shape(input=net)[0]
image_size = net.get_shape().dims[1].value * \
net.get_shape().dims[2].value
feature_size = net.get_shape().dims[3].value
return tf.reshape(net, tf.stack([batch_size, image_size, feature_size]))
def char_predictions(self, chars_logit):
"""Returns confidence scores (softmax values) for predicted characters.
Args:
chars_logit: chars logits, a tensor with shape [batch_size x seq_length x
num_char_classes]
Returns:
A tuple (ids, log_prob, scores), where:
ids - predicted characters, a int32 tensor with shape
[batch_size x seq_length];
log_prob - a log probability of all characters, a float tensor with
shape [batch_size, seq_length, num_char_classes];
scores - corresponding confidence scores for characters, a float
tensor
with shape [batch_size x seq_length].
"""
log_prob = utils.logits_to_log_prob(chars_logit)
ids = tf.cast(tf.argmax(input=log_prob, axis=2),
name='predicted_chars', dtype=tf.int32)
mask = tf.cast(
slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool)
all_scores = tf.nn.softmax(chars_logit)
selected_scores = tf.boolean_mask(
tensor=all_scores, mask=mask, name='char_scores')
scores = tf.reshape(
selected_scores,
shape=(-1, self._params.seq_length),
name='predicted_scores')
return ids, log_prob, scores
def encode_coordinates_fn(self, net):
"""Adds one-hot encoding of coordinates to different views in the networks.
For each "pixel" of a feature map it adds a onehot encoded x and y
coordinates.
Args:
net: a tensor of shape=[batch_size, height, width, num_features]
Returns:
a tensor with the same height and width, but altered feature_size.
"""
mparams = self._mparams['encode_coordinates_fn']
if mparams.enabled:
batch_size, h, w, _ = get_tensor_dimensions(net)
x, y = tf.meshgrid(tf.range(w), tf.range(h))
w_loc = slim.one_hot_encoding(x, num_classes=w)
h_loc = slim.one_hot_encoding(y, num_classes=h)
loc = tf.concat([h_loc, w_loc], 2)
loc = tf.tile(tf.expand_dims(loc, 0), tf.stack([batch_size, 1, 1, 1]))
return tf.concat([net, loc], 3)
else:
return net
def create_base(self,
images,
labels_one_hot,
scope='AttentionOcr_v1',
reuse=None):
"""Creates a base part of the Model (no gradients, losses or summaries).
Args:
images: A tensor of shape [batch_size, height, width, channels] with pixel
values in the range [0.0, 1.0].
labels_one_hot: Optional (can be None) one-hot encoding for ground truth
labels. If provided the function will create a model for training.
scope: Optional variable_scope.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A named tuple OutputEndpoints.
"""
logging.debug('images: %s', images)
is_training = labels_one_hot is not None
# Normalize image pixel values to have a symmetrical range around zero.
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.5)
with tf.compat.v1.variable_scope(scope, reuse=reuse):
views = tf.split(
value=images, num_or_size_splits=self._params.num_views, axis=2)
logging.debug('Views=%d single view: %s', len(views), views[0])
nets = [
self.conv_tower_fn(v, is_training, reuse=(i != 0))
for i, v in enumerate(views)
]
logging.debug('Conv tower: %s', nets[0])
nets = [self.encode_coordinates_fn(net) for net in nets]
logging.debug('Conv tower w/ encoded coordinates: %s', nets[0])
net = self.pool_views_fn(nets)
logging.debug('Pooled views: %s', net)
chars_logit = self.sequence_logit_fn(net, labels_one_hot)
logging.debug('chars_logit: %s', chars_logit)
predicted_chars, chars_log_prob, predicted_scores = (
self.char_predictions(chars_logit))
if self._charset:
character_mapper = CharsetMapper(self._charset)
predicted_text = character_mapper.get_text(predicted_chars)
else:
predicted_text = tf.constant([])
text_log_prob, predicted_length = null_based_length_prediction(
chars_log_prob, self._params.null_code)
predicted_conf = lookup_indexed_value(predicted_length, text_log_prob)
# Convert predicted confidence from sum of logs to geometric mean
normalized_seq_conf = tf.exp(
tf.divide(predicted_conf,
tf.cast(predicted_length + 1, predicted_conf.dtype)),
name='normalized_seq_conf')
predicted_conf = tf.identity(predicted_conf, name='predicted_conf')
predicted_text = tf.identity(predicted_text, name='predicted_text')
predicted_length = tf.identity(predicted_length, name='predicted_length')
return OutputEndpoints(
chars_logit=chars_logit,
chars_log_prob=chars_log_prob,
predicted_chars=predicted_chars,
predicted_scores=predicted_scores,
predicted_length=predicted_length,
predicted_text=predicted_text,
predicted_conf=predicted_conf,
normalized_seq_conf=normalized_seq_conf)
def create_loss(self, data, endpoints):
"""Creates all losses required to train the model.
Args:
data: InputEndpoints namedtuple.
endpoints: Model namedtuple.
Returns:
Total loss.
"""
# NOTE: the return value of ModelLoss is not used directly for the
# gradient computation because under the hood it calls slim.losses.AddLoss,
# which registers the loss in an internal collection and later returns it
# as part of GetTotalLoss. We need to use total loss because model may have
# multiple losses including regularization losses.
self.sequence_loss_fn(endpoints.chars_logit, data.labels)
total_loss = slim.losses.get_total_loss()
tf.compat.v1.summary.scalar('TotalLoss', total_loss)
return total_loss
def label_smoothing_regularization(self, chars_labels, weight=0.1):
"""Applies a label smoothing regularization.
Uses the same method as in https://arxiv.org/abs/1512.00567.
Args:
chars_labels: ground truth ids of charactes, shape=[batch_size,
seq_length];
weight: label-smoothing regularization weight.
Returns:
A sensor with the same shape as the input.
"""
one_hot_labels = tf.one_hot(
chars_labels, depth=self._params.num_char_classes, axis=-1)
pos_weight = 1.0 - weight
neg_weight = weight / self._params.num_char_classes
return one_hot_labels * pos_weight + neg_weight
def sequence_loss_fn(self, chars_logits, chars_labels):
"""Loss function for char sequence.
Depending on values of hyper parameters it applies label smoothing and can
also ignore all null chars after the first one.
Args:
chars_logits: logits for predicted characters, shape=[batch_size,
seq_length, num_char_classes];
chars_labels: ground truth ids of characters, shape=[batch_size,
seq_length];
mparams: method hyper parameters.
Returns:
A Tensor with shape [batch_size] - the log-perplexity for each sequence.
"""
mparams = self._mparams['sequence_loss_fn']
with tf.compat.v1.variable_scope('sequence_loss_fn/SLF'):
if mparams.label_smoothing > 0:
smoothed_one_hot_labels = self.label_smoothing_regularization(
chars_labels, mparams.label_smoothing)
labels_list = tf.unstack(smoothed_one_hot_labels, axis=1)
else:
# NOTE: in case of sparse softmax we are not using one-hot
# encoding.
labels_list = tf.unstack(chars_labels, axis=1)
batch_size, seq_length, _ = chars_logits.shape.as_list()
if mparams.ignore_nulls:
weights = tf.ones((batch_size, seq_length), dtype=tf.float32)
else:
# Suppose that reject character is the last in the charset.
reject_char = tf.constant(
self._params.num_char_classes - 1,
shape=(batch_size, seq_length),
dtype=tf.int64)
known_char = tf.not_equal(chars_labels, reject_char)
weights = tf.cast(known_char, dtype=tf.float32)
logits_list = tf.unstack(chars_logits, axis=1)
weights_list = tf.unstack(weights, axis=1)
loss = tf.contrib.legacy_seq2seq.sequence_loss(
logits_list,
labels_list,
weights_list,
softmax_loss_function=get_softmax_loss_fn(mparams.label_smoothing),
average_across_timesteps=mparams.average_across_timesteps)
tf.compat.v1.losses.add_loss(loss)
return loss
def create_summaries(self, data, endpoints, charset, is_training):
"""Creates all summaries for the model.
Args:
data: InputEndpoints namedtuple.
endpoints: OutputEndpoints namedtuple.
charset: A dictionary with mapping between character codes and unicode
characters. Use the one provided by a dataset.charset.
is_training: If True will create summary prefixes for training job,
otherwise - for evaluation.
Returns:
A list of evaluation ops
"""
def sname(label):
prefix = 'train' if is_training else 'eval'
return '%s/%s' % (prefix, label)
max_outputs = 4
# TODO(gorban): uncomment, when tf.summary.text released.
# charset_mapper = CharsetMapper(charset)
# pr_text = charset_mapper.get_text(
# endpoints.predicted_chars[:max_outputs,:])
# tf.summary.text(sname('text/pr'), pr_text)
# gt_text = charset_mapper.get_text(data.labels[:max_outputs,:])
# tf.summary.text(sname('text/gt'), gt_text)
tf.compat.v1.summary.image(
sname('image'), data.images, max_outputs=max_outputs)
if is_training:
tf.compat.v1.summary.image(
sname('image/orig'), data.images_orig, max_outputs=max_outputs)
for var in tf.compat.v1.trainable_variables():
tf.compat.v1.summary.histogram(var.op.name, var)
return None
else:
names_to_values = {}
names_to_updates = {}
def use_metric(name, value_update_tuple):
names_to_values[name] = value_update_tuple[0]
names_to_updates[name] = value_update_tuple[1]
use_metric(
'CharacterAccuracy',
metrics.char_accuracy(
endpoints.predicted_chars,
data.labels,
streaming=True,
rej_char=self._params.null_code))
# Sequence accuracy computed by cutting sequence at the first null char
use_metric(
'SequenceAccuracy',
metrics.sequence_accuracy(
endpoints.predicted_chars,
data.labels,
streaming=True,
rej_char=self._params.null_code))
for name, value in names_to_values.items():
summary_name = 'eval/' + name
tf.compat.v1.summary.scalar(
summary_name, tf.compat.v1.Print(value, [value], summary_name))
return list(names_to_updates.values())
def create_init_fn_to_restore(self,
master_checkpoint,
inception_checkpoint=None):
"""Creates an init operations to restore weights from various checkpoints.
Args:
master_checkpoint: path to a checkpoint which contains all weights for the
whole model.
inception_checkpoint: path to a checkpoint which contains weights for the
inception part only.
Returns:
a function to run initialization ops.
"""
all_assign_ops = []
all_feed_dict = {}
def assign_from_checkpoint(variables, checkpoint):
logging.info('Request to re-store %d weights from %s', len(variables),
checkpoint)
if not variables:
logging.error('Can\'t find any variables to restore.')
sys.exit(1)
assign_op, feed_dict = slim.assign_from_checkpoint(checkpoint, variables)
all_assign_ops.append(assign_op)
all_feed_dict.update(feed_dict)
logging.info('variables_to_restore:\n%s',
utils.variables_to_restore().keys())
logging.info('moving_average_variables:\n%s',
[v.op.name for v in tf.compat.v1.moving_average_variables()])
logging.info('trainable_variables:\n%s',
[v.op.name for v in tf.compat.v1.trainable_variables()])
if master_checkpoint:
assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint)
if inception_checkpoint:
variables = utils.variables_to_restore(
'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True)
assign_from_checkpoint(variables, inception_checkpoint)
def init_assign_fn(sess):
logging.info('Restoring checkpoint(s)')
sess.run(all_assign_ops, all_feed_dict)
return init_assign_fn
| 28,367 | 36.424802 | 87 | py |
models | models-master/research/attention_ocr/python/metrics_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the metrics module."""
import contextlib
import numpy as np
import tensorflow as tf
import metrics
class AccuracyTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
self.rng = np.random.RandomState([11, 23, 50])
self.num_char_classes = 3
self.batch_size = 4
self.seq_length = 5
self.rej_char = 42
@contextlib.contextmanager
def initialized_session(self):
"""Wrapper for test session context manager with required initialization.
Yields:
A session object that should be used as a context manager.
"""
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.local_variables_initializer())
yield sess
def _fake_labels(self):
return self.rng.randint(
low=0,
high=self.num_char_classes,
size=(self.batch_size, self.seq_length),
dtype='int32')
def _incorrect_copy(self, values, bad_indexes):
incorrect = np.copy(values)
incorrect[bad_indexes] = values[bad_indexes] + 1
return incorrect
def test_sequence_accuracy_identical_samples(self):
labels_tf = tf.convert_to_tensor(value=self._fake_labels())
accuracy_tf = metrics.sequence_accuracy(labels_tf, labels_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
self.assertAlmostEqual(accuracy_np, 1.0)
def test_sequence_accuracy_one_char_difference(self):
ground_truth_np = self._fake_labels()
ground_truth_tf = tf.convert_to_tensor(value=ground_truth_np)
prediction_tf = tf.convert_to_tensor(
value=self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0))))
accuracy_tf = metrics.sequence_accuracy(prediction_tf, ground_truth_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
# 1 of 4 sequences is incorrect.
self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / self.batch_size)
def test_char_accuracy_one_char_difference_with_padding(self):
ground_truth_np = self._fake_labels()
ground_truth_tf = tf.convert_to_tensor(value=ground_truth_np)
prediction_tf = tf.convert_to_tensor(
value=self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0))))
accuracy_tf = metrics.char_accuracy(prediction_tf, ground_truth_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
chars_count = self.seq_length * self.batch_size
self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / chars_count)
if __name__ == '__main__':
tf.test.main()
| 3,445 | 34.163265 | 80 | py |
models | models-master/research/attention_ocr/python/demo_inference.py | """A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--checkpoint=model.ckpt-399731\
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.compat.v1 import flags
from tensorflow.python.training import monitored_session
import common_flags
import datasets
import data_provider
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='uint8')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.io.gfile.GFile(path, 'rb'))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def create_model(batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
raw_images = tf.compat.v1.placeholder(
tf.uint8, shape=[batch_size, height, width, 3])
images = tf.map_fn(data_provider.preprocess_image, raw_images,
dtype=tf.float32)
endpoints = model.create_base(images, labels_one_hot=None)
return raw_images, endpoints
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return [pr_bytes.decode('utf-8') for pr_bytes in predictions.tolist()]
def main(_):
print("Predicted strings:")
predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,
FLAGS.image_path_pattern)
for line in predictions:
print(line)
if __name__ == '__main__':
tf.compat.v1.app.run()
| 3,635 | 36.102041 | 78 | py |
models | models-master/research/attention_ocr/python/sequence_layers.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various implementations of sequence layers for character prediction.
A 'sequence layer' is a part of a computation graph which is responsible of
producing a sequence of characters using extracted image features. There are
many reasonable ways to implement such layers. All of them are using RNNs.
This module provides implementations which uses 'attention' mechanism to
spatially 'pool' image features and also can use a previously predicted
character to predict the next (aka auto regression).
Usage:
Select one of available classes, e.g. Attention or use a wrapper function to
pick one based on your requirements:
layer_class = sequence_layers.get_layer_class(use_attention=True,
use_autoregression=True)
layer = layer_class(net, labels_one_hot, model_params, method_params)
char_logits = layer.create_logits()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import abc
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs):
"""Generates orthonormal matrices with random values.
Orthonormal initialization is important for RNNs:
http://arxiv.org/abs/1312.6120
http://smerity.com/articles/2016/orthogonal_init.html
For non-square shapes the returned matrix will be semi-orthonormal: if the
number of columns exceeds the number of rows, then the rows are orthonormal
vectors; but if the number of rows exceeds the number of columns, then the
columns are orthonormal vectors.
We use SVD decomposition to generate an orthonormal matrix with random
values. The same way as it is done in the Lasagne library for Theano. Note
that both u and v returned by the svd are orthogonal and random. We just need
to pick one with the right shape.
Args:
shape: a shape of the tensor matrix to initialize.
dtype: a dtype of the initialized tensor.
*args: not used.
**kwargs: not used.
Returns:
An initialized tensor.
"""
del args
del kwargs
flat_shape = (shape[0], np.prod(shape[1:]))
w = np.random.randn(*flat_shape)
u, _, v = np.linalg.svd(w, full_matrices=False)
w = u if u.shape == flat_shape else v
return tf.constant(w.reshape(shape), dtype=dtype)
SequenceLayerParams = collections.namedtuple('SequenceLogitsParams', [
'num_lstm_units', 'weight_decay', 'lstm_state_clip_value'
])
class SequenceLayerBase(object):
"""A base abstruct class for all sequence layers.
A child class has to define following methods:
get_train_input
get_eval_input
unroll_cell
"""
__metaclass__ = abc.ABCMeta
def __init__(self, net, labels_one_hot, model_params, method_params):
"""Stores argument in member variable for further use.
Args:
net: A tensor with shape [batch_size, num_features, feature_size] which
contains some extracted image features.
labels_one_hot: An optional (can be None) ground truth labels for the
input features. Is a tensor with shape
[batch_size, seq_length, num_char_classes]
model_params: A namedtuple with model parameters (model.ModelParams).
method_params: A SequenceLayerParams instance.
"""
self._params = model_params
self._mparams = method_params
self._net = net
self._labels_one_hot = labels_one_hot
self._batch_size = tf.shape(input=net)[0]
# Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder.
self._char_logits = {}
regularizer = tf.keras.regularizers.l2(0.5 * (self._mparams.weight_decay))
self._softmax_w = slim.model_variable(
'softmax_w',
[self._mparams.num_lstm_units, self._params.num_char_classes],
initializer=orthogonal_initializer,
regularizer=regularizer)
self._softmax_b = slim.model_variable(
'softmax_b', [self._params.num_char_classes],
initializer=tf.compat.v1.zeros_initializer(),
regularizer=regularizer)
@abc.abstractmethod
def get_train_input(self, prev, i):
"""Returns a sample to be used to predict a character during training.
This function is used as a loop_function for an RNN decoder.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
pass
@abc.abstractmethod
def get_eval_input(self, prev, i):
"""Returns a sample to be used to predict a character during inference.
This function is used as a loop_function for an RNN decoder.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
raise AssertionError('Not implemented')
@abc.abstractmethod
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
"""Unrolls an RNN cell for all inputs.
This is a placeholder to call some RNN decoder. It has a similar to
tf.seq2seq.rnn_decode interface.
Args:
decoder_inputs: A list of 2D Tensors* [batch_size x input_size]. In fact,
most of existing decoders in presence of a loop_function use only the
first element to determine batch_size and length of the list to
determine number of steps.
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
loop_function: function will be applied to the i-th output in order to
generate the i+1-st input (see self.get_input).
cell: rnn_cell.RNNCell defining the cell function and size.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of character logits of the same length as
decoder_inputs of 2D Tensors with shape [batch_size x num_characters].
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
pass
def is_training(self):
"""Returns True if the layer is created for training stage."""
return self._labels_one_hot is not None
def char_logit(self, inputs, char_index):
"""Creates logits for a character if required.
Args:
inputs: A tensor with shape [batch_size, ?] (depth is implementation
dependent).
char_index: A integer index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, num_char_classes]
"""
if char_index not in self._char_logits:
self._char_logits[char_index] = tf.compat.v1.nn.xw_plus_b(inputs, self._softmax_w,
self._softmax_b)
return self._char_logits[char_index]
def char_one_hot(self, logit):
"""Creates one hot encoding for a logit of a character.
Args:
logit: A tensor with shape [batch_size, num_char_classes].
Returns:
A tensor with shape [batch_size, num_char_classes]
"""
prediction = tf.argmax(input=logit, axis=1)
return slim.one_hot_encoding(prediction, self._params.num_char_classes)
def get_input(self, prev, i):
"""A wrapper for get_train_input and get_eval_input.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
if self.is_training():
return self.get_train_input(prev, i)
else:
return self.get_eval_input(prev, i)
def create_logits(self):
"""Creates character sequence logits for a net specified in the constructor.
A "main" method for the sequence layer which glues together all pieces.
Returns:
A tensor with shape [batch_size, seq_length, num_char_classes].
"""
with tf.compat.v1.variable_scope('LSTM'):
first_label = self.get_input(prev=None, i=0)
decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)
lstm_cell = tf.compat.v1.nn.rnn_cell.LSTMCell(
self._mparams.num_lstm_units,
use_peepholes=False,
cell_clip=self._mparams.lstm_state_clip_value,
state_is_tuple=True,
initializer=orthogonal_initializer)
lstm_outputs, _ = self.unroll_cell(
decoder_inputs=decoder_inputs,
initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),
loop_function=self.get_input,
cell=lstm_cell)
with tf.compat.v1.variable_scope('logits'):
logits_list = [
tf.expand_dims(self.char_logit(logit, i), axis=1)
for i, logit in enumerate(lstm_outputs)
]
return tf.concat(logits_list, 1)
class NetSlice(SequenceLayerBase):
"""A layer which uses a subset of image features to predict each character.
"""
def __init__(self, *args, **kwargs):
super(NetSlice, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros(
tf.stack([self._batch_size, self._params.num_char_classes]))
def get_image_feature(self, char_index):
"""Returns a subset of image features for a character.
Args:
char_index: an index of a character.
Returns:
A tensor with shape [batch_size, ?]. The output depth depends on the
depth of input net.
"""
batch_size, features_num, _ = [d.value for d in self._net.get_shape()]
slice_len = int(features_num / self._params.seq_length)
# In case when features_num != seq_length, we just pick a subset of image
# features, this choice is arbitrary and there is no intuitive geometrical
# interpretation. If features_num is not dividable by seq_length there will
# be unused image features.
net_slice = self._net[:, char_index:char_index + slice_len, :]
feature = tf.reshape(net_slice, [batch_size, -1])
logging.debug('Image feature: %s', feature)
return feature
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
del prev
return self.get_image_feature(i)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
return self.get_eval_input(prev, i)
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
"""See SequenceLayerBase.unroll_cell for details."""
return tf.contrib.legacy_seq2seq.rnn_decoder(
decoder_inputs=decoder_inputs,
initial_state=initial_state,
cell=cell,
loop_function=self.get_input)
class NetSliceWithAutoregression(NetSlice):
"""A layer similar to NetSlice, but it also uses auto regression.
The "auto regression" means that we use network output for previous character
as a part of input for the current character.
"""
def __init__(self, *args, **kwargs):
super(NetSliceWithAutoregression, self).__init__(*args, **kwargs)
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
if i == 0:
prev = self._zero_label
else:
logit = self.char_logit(prev, char_index=i - 1)
prev = self.char_one_hot(logit)
image_feature = self.get_image_feature(char_index=i)
return tf.concat([image_feature, prev], 1)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
if i == 0:
prev = self._zero_label
else:
prev = self._labels_one_hot[:, i - 1, :]
image_feature = self.get_image_feature(i)
return tf.concat([image_feature, prev], 1)
class Attention(SequenceLayerBase):
"""A layer which uses attention mechanism to select image features."""
def __init__(self, *args, **kwargs):
super(Attention, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros(
tf.stack([self._batch_size, self._params.num_char_classes]))
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
del prev, i
# The attention_decoder will fetch image features from the net, no need for
# extra inputs.
return self._zero_label
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
return self.get_eval_input(prev, i)
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
return tf.contrib.legacy_seq2seq.attention_decoder(
decoder_inputs=decoder_inputs,
initial_state=initial_state,
attention_states=self._net,
cell=cell,
loop_function=self.get_input)
class AttentionWithAutoregression(Attention):
"""A layer which uses both attention and auto regression."""
def __init__(self, *args, **kwargs):
super(AttentionWithAutoregression, self).__init__(*args, **kwargs)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
if i == 0:
return self._zero_label
else:
# TODO(gorban): update to gradually introduce gt labels.
return self._labels_one_hot[:, i - 1, :]
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
if i == 0:
return self._zero_label
else:
logit = self.char_logit(prev, char_index=i - 1)
return self.char_one_hot(logit)
def get_layer_class(use_attention, use_autoregression):
"""A convenience function to get a layer class based on requirements.
Args:
use_attention: if True a returned class will use attention.
use_autoregression: if True a returned class will use auto regression.
Returns:
One of available sequence layers (child classes for SequenceLayerBase).
"""
if use_attention and use_autoregression:
layer_class = AttentionWithAutoregression
elif use_attention and not use_autoregression:
layer_class = Attention
elif not use_attention and not use_autoregression:
layer_class = NetSlice
elif not use_attention and use_autoregression:
layer_class = NetSliceWithAutoregression
else:
raise AssertionError('Unsupported sequence layer class')
logging.debug('Use %s as a layer class', layer_class.__name__)
return layer_class
| 15,179 | 34.886525 | 88 | py |
models | models-master/research/attention_ocr/python/common_flags.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define flags are common for both train.py and eval.py scripts."""
import logging
import sys
from tensorflow.compat.v1 import flags
import datasets
import model
FLAGS = flags.FLAGS
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
_common_flags_defined = False
def define():
"""Define common flags."""
# yapf: disable
# common_flags.define() may be called multiple times in unit tests.
global _common_flags_defined
if _common_flags_defined:
return
_common_flags_defined = True
flags.DEFINE_integer('batch_size', 32,
'Batch size.')
flags.DEFINE_integer('crop_width', None,
'Width of the central crop for images.')
flags.DEFINE_integer('crop_height', None,
'Height of the central crop for images.')
flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train',
'Directory where to write event logs.')
flags.DEFINE_string('dataset_name', 'fsns',
'Name of the dataset. Supported: fsns')
flags.DEFINE_string('split_name', 'train',
'Dataset split name to run evaluation for: test,train.')
flags.DEFINE_string('dataset_dir', None,
'Dataset root folder.')
flags.DEFINE_string('checkpoint', '',
'Path for checkpoint to restore weights from.')
flags.DEFINE_string('master',
'',
'BNS name of the TensorFlow master to use.')
# Model hyper parameters
flags.DEFINE_float('learning_rate', 0.004,
'learning rate')
flags.DEFINE_string('optimizer', 'momentum',
'the optimizer to use')
flags.DEFINE_float('momentum', 0.9,
'momentum value for the momentum optimizer if used')
flags.DEFINE_bool('use_augment_input', True,
'If True will use image augmentation')
# Method hyper parameters
# conv_tower_fn
flags.DEFINE_string('final_endpoint', 'Mixed_5d',
'Endpoint to cut inception tower')
# sequence_logit_fn
flags.DEFINE_bool('use_attention', True,
'If True will use the attention mechanism')
flags.DEFINE_bool('use_autoregression', True,
'If True will use autoregression (a feedback link)')
flags.DEFINE_integer('num_lstm_units', 256,
'number of LSTM units for sequence LSTM')
flags.DEFINE_float('weight_decay', 0.00004,
'weight decay for char prediction FC layers')
flags.DEFINE_float('lstm_state_clip_value', 10.0,
'cell state is clipped by this value prior to the cell'
' output activation')
# 'sequence_loss_fn'
flags.DEFINE_float('label_smoothing', 0.1,
'weight for label smoothing')
flags.DEFINE_bool('ignore_nulls', True,
'ignore null characters for computing the loss')
flags.DEFINE_bool('average_across_timesteps', False,
'divide the returned cost by the total label weight')
# yapf: enable
def get_crop_size():
if FLAGS.crop_width and FLAGS.crop_height:
return (FLAGS.crop_width, FLAGS.crop_height)
else:
return None
def create_dataset(split_name):
ds_module = getattr(datasets, FLAGS.dataset_name)
return ds_module.get_split(split_name, dataset_dir=FLAGS.dataset_dir)
def create_mparams():
return {
'conv_tower_fn':
model.ConvTowerParams(final_endpoint=FLAGS.final_endpoint),
'sequence_logit_fn':
model.SequenceLogitsParams(
use_attention=FLAGS.use_attention,
use_autoregression=FLAGS.use_autoregression,
num_lstm_units=FLAGS.num_lstm_units,
weight_decay=FLAGS.weight_decay,
lstm_state_clip_value=FLAGS.lstm_state_clip_value),
'sequence_loss_fn':
model.SequenceLossParams(
label_smoothing=FLAGS.label_smoothing,
ignore_nulls=FLAGS.ignore_nulls,
average_across_timesteps=FLAGS.average_across_timesteps)
}
def create_model(*args, **kwargs):
ocr_model = model.Model(mparams=create_mparams(), *args, **kwargs)
return ocr_model
| 5,043 | 30.924051 | 80 | py |
models | models-master/research/attention_ocr/python/eval.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate a trained Attention OCR model.
A simple usage example:
python eval.py
"""
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import app
from tensorflow.compat.v1 import flags
import data_provider
import common_flags
FLAGS = flags.FLAGS
common_flags.define()
# yapf: disable
flags.DEFINE_integer('num_batches', 100,
'Number of batches to run eval for.')
flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval',
'Directory where the evaluation results are saved to.')
flags.DEFINE_integer('eval_interval_secs', 60,
'Frequency in seconds to run evaluations.')
flags.DEFINE_integer('number_of_steps', None,
'Number of times to run evaluation.')
# yapf: enable
def main(_):
if not tf.io.gfile.exists(FLAGS.eval_log_dir):
tf.io.gfile.makedirs(FLAGS.eval_log_dir)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(dataset.num_char_classes,
dataset.max_sequence_length,
dataset.num_of_views, dataset.null_code)
data = data_provider.get_data(
dataset,
FLAGS.batch_size,
augment=False,
central_crop_size=common_flags.get_crop_size())
endpoints = model.create_base(data.images, labels_one_hot=None)
model.create_loss(data, endpoints)
eval_ops = model.create_summaries(
data, endpoints, dataset.charset, is_training=False)
slim.get_or_create_global_step()
session_config = tf.compat.v1.ConfigProto(device_count={"GPU": 0})
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.train_log_dir,
logdir=FLAGS.eval_log_dir,
eval_op=eval_ops,
num_evals=FLAGS.num_batches,
eval_interval_secs=FLAGS.eval_interval_secs,
max_number_of_evaluations=FLAGS.number_of_steps,
session_config=session_config)
if __name__ == '__main__':
app.run()
| 2,701 | 33.202532 | 80 | py |
models | models-master/research/attention_ocr/python/metrics.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quality metrics for the model."""
import tensorflow as tf
def char_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes character level accuracy.
Both predictions and targets should have the same shape
[batch_size x seq_length].
Args:
predictions: predicted characters ids.
targets: ground truth character ids.
rej_char: the character id used to mark an empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total character accuracy.
"""
with tf.compat.v1.variable_scope('CharAccuracy'):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.cast(targets, dtype=tf.int32)
const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
weights = tf.cast(tf.not_equal(targets, const_rej_char), dtype=tf.float32)
correct_chars = tf.cast(tf.equal(predictions, targets), dtype=tf.float32)
accuracy_per_example = tf.compat.v1.div(
tf.reduce_sum(input_tensor=tf.multiply(
correct_chars, weights), axis=1),
tf.reduce_sum(input_tensor=weights, axis=1))
if streaming:
return tf.metrics.mean(accuracy_per_example)
else:
return tf.reduce_mean(input_tensor=accuracy_per_example)
def sequence_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes sequence level accuracy.
Both input tensors should have the same shape: [batch_size x seq_length].
Args:
predictions: predicted character classes.
targets: ground truth character classes.
rej_char: the character id used to mark empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total sequence accuracy.
"""
with tf.compat.v1.variable_scope('SequenceAccuracy'):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.cast(targets, dtype=tf.int32)
const_rej_char = tf.constant(
rej_char, shape=targets.get_shape(), dtype=tf.int32)
include_mask = tf.not_equal(targets, const_rej_char)
include_predictions = tf.cast(
tf.compat.v1.where(include_mask, predictions,
tf.zeros_like(predictions) + rej_char), dtype=tf.int32)
correct_chars = tf.cast(
tf.equal(include_predictions, targets), dtype=tf.float32)
correct_chars_counts = tf.cast(
tf.reduce_sum(input_tensor=correct_chars, axis=[1]), dtype=tf.int32)
target_length = targets.get_shape().dims[1].value
target_chars_counts = tf.constant(
target_length, shape=correct_chars_counts.get_shape())
accuracy_per_example = tf.cast(
tf.equal(correct_chars_counts, target_chars_counts), dtype=tf.float32)
if streaming:
return tf.metrics.mean(accuracy_per_example)
else:
return tf.reduce_mean(input_tensor=accuracy_per_example)
| 3,771 | 39.55914 | 82 | py |
models | models-master/research/attention_ocr/python/sequence_layers_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequence_layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import model
import sequence_layers
def fake_net(batch_size, num_features, feature_size):
return tf.convert_to_tensor(
value=np.random.uniform(size=(batch_size, num_features, feature_size)),
dtype=tf.float32)
def fake_labels(batch_size, seq_length, num_char_classes):
labels_np = tf.convert_to_tensor(
value=np.random.randint(
low=0, high=num_char_classes, size=(batch_size, seq_length)))
return slim.one_hot_encoding(labels_np, num_classes=num_char_classes)
def create_layer(layer_class, batch_size, seq_length, num_char_classes):
model_params = model.ModelParams(
num_char_classes=num_char_classes,
seq_length=seq_length,
num_views=1,
null_code=num_char_classes)
net = fake_net(
batch_size=batch_size, num_features=seq_length * 5, feature_size=6)
labels_one_hot = fake_labels(batch_size, seq_length, num_char_classes)
layer_params = sequence_layers.SequenceLayerParams(
num_lstm_units=10, weight_decay=0.00004, lstm_state_clip_value=10.0)
return layer_class(net, labels_one_hot, model_params, layer_params)
class SequenceLayersTest(tf.test.TestCase):
def test_net_slice_char_logits_with_correct_shape(self):
batch_size = 2
seq_length = 4
num_char_classes = 3
layer = create_layer(sequence_layers.NetSlice, batch_size, seq_length,
num_char_classes)
char_logits = layer.create_logits()
self.assertEqual(
tf.TensorShape([batch_size, seq_length, num_char_classes]),
char_logits.get_shape())
def test_net_slice_with_autoregression_char_logits_with_correct_shape(self):
batch_size = 2
seq_length = 4
num_char_classes = 3
layer = create_layer(sequence_layers.NetSliceWithAutoregression,
batch_size, seq_length, num_char_classes)
char_logits = layer.create_logits()
self.assertEqual(
tf.TensorShape([batch_size, seq_length, num_char_classes]),
char_logits.get_shape())
def test_attention_char_logits_with_correct_shape(self):
batch_size = 2
seq_length = 4
num_char_classes = 3
layer = create_layer(sequence_layers.Attention, batch_size, seq_length,
num_char_classes)
char_logits = layer.create_logits()
self.assertEqual(
tf.TensorShape([batch_size, seq_length, num_char_classes]),
char_logits.get_shape())
def test_attention_with_autoregression_char_logits_with_correct_shape(self):
batch_size = 2
seq_length = 4
num_char_classes = 3
layer = create_layer(sequence_layers.AttentionWithAutoregression,
batch_size, seq_length, num_char_classes)
char_logits = layer.create_logits()
self.assertEqual(
tf.TensorShape([batch_size, seq_length, num_char_classes]),
char_logits.get_shape())
if __name__ == '__main__':
tf.test.main()
| 3,801 | 32.646018 | 80 | py |
models | models-master/research/attention_ocr/python/data_provider.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to read, decode and pre-process input data for the Model.
"""
import collections
import functools
import tensorflow as tf
from tensorflow.contrib import slim
import inception_preprocessing
# Tuple to store input data endpoints for the Model.
# It has following fields (tensors):
# images: input images,
# shape [batch_size x H x W x 3];
# labels: ground truth label ids,
# shape=[batch_size x seq_length];
# labels_one_hot: labels in one-hot encoding,
# shape [batch_size x seq_length x num_char_classes];
InputEndpoints = collections.namedtuple(
'InputEndpoints', ['images', 'images_orig', 'labels', 'labels_one_hot'])
# A namedtuple to define a configuration for shuffled batch fetching.
# num_batching_threads: A number of parallel threads to fetch data.
# queue_capacity: a max number of elements in the batch shuffling queue.
# min_after_dequeue: a min number elements in the queue after a dequeue, used
# to ensure a level of mixing of elements.
ShuffleBatchConfig = collections.namedtuple('ShuffleBatchConfig', [
'num_batching_threads', 'queue_capacity', 'min_after_dequeue'
])
DEFAULT_SHUFFLE_CONFIG = ShuffleBatchConfig(
num_batching_threads=8, queue_capacity=3000, min_after_dequeue=1000)
def augment_image(image):
"""Augmentation the image with a random modification.
Args:
image: input Tensor image of rank 3, with the last dimension
of size 3.
Returns:
Distorted Tensor image of the same shape.
"""
with tf.compat.v1.variable_scope('AugmentImage'):
height = image.get_shape().dims[0].value
width = image.get_shape().dims[1].value
# Random crop cut from the street sign image, resized to the same size.
# Assures that the crop is covers at least 0.8 area of the input image.
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
image_size=tf.shape(input=image),
bounding_boxes=tf.zeros([0, 0, 4]),
min_object_covered=0.8,
aspect_ratio_range=[0.8, 1.2],
area_range=[0.8, 1.0],
use_image_if_no_bounding_boxes=True)
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# Randomly chooses one of the 4 interpolation methods
distorted_image = inception_preprocessing.apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize(x, [height, width], method),
num_cases=4)
distorted_image.set_shape([height, width, 3])
# Color distortion
distorted_image = inception_preprocessing.apply_with_random_selector(
distorted_image,
functools.partial(
inception_preprocessing.distort_color, fast_mode=False),
num_cases=4)
distorted_image = tf.clip_by_value(distorted_image, -1.5, 1.5)
return distorted_image
def central_crop(image, crop_size):
"""Returns a central crop for the specified size of an image.
Args:
image: A tensor with shape [height, width, channels]
crop_size: A tuple (crop_width, crop_height)
Returns:
A tensor of shape [crop_height, crop_width, channels].
"""
with tf.compat.v1.variable_scope('CentralCrop'):
target_width, target_height = crop_size
image_height, image_width = tf.shape(
input=image)[0], tf.shape(input=image)[1]
assert_op1 = tf.Assert(
tf.greater_equal(image_height, target_height),
['image_height < target_height', image_height, target_height])
assert_op2 = tf.Assert(
tf.greater_equal(image_width, target_width),
['image_width < target_width', image_width, target_width])
with tf.control_dependencies([assert_op1, assert_op2]):
offset_width = tf.cast((image_width - target_width) / 2, tf.int32)
offset_height = tf.cast((image_height - target_height) / 2, tf.int32)
return tf.image.crop_to_bounding_box(image, offset_height, offset_width,
target_height, target_width)
def preprocess_image(image, augment=False, central_crop_size=None,
num_towers=4):
"""Normalizes image to have values in a narrow range around zero.
Args:
image: a [H x W x 3] uint8 tensor.
augment: optional, if True do random image distortion.
central_crop_size: A tuple (crop_width, crop_height).
num_towers: optional, number of shots of the same image in the input image.
Returns:
A float32 tensor of shape [H x W x 3] with RGB values in the required
range.
"""
with tf.compat.v1.variable_scope('PreprocessImage'):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if augment or central_crop_size:
if num_towers == 1:
images = [image]
else:
images = tf.split(value=image, num_or_size_splits=num_towers, axis=1)
if central_crop_size:
view_crop_size = (int(central_crop_size[0] / num_towers),
central_crop_size[1])
images = [central_crop(img, view_crop_size) for img in images]
if augment:
images = [augment_image(img) for img in images]
image = tf.concat(images, 1)
return image
def get_data(dataset,
batch_size,
augment=False,
central_crop_size=None,
shuffle_config=None,
shuffle=True):
"""Wraps calls to DatasetDataProviders and shuffle_batch.
For more details about supported Dataset objects refer to datasets/fsns.py.
Args:
dataset: a slim.data.dataset.Dataset object.
batch_size: number of samples per batch.
augment: optional, if True does random image distortion.
central_crop_size: A CharLogittuple (crop_width, crop_height).
shuffle_config: A namedtuple ShuffleBatchConfig.
shuffle: if True use data shuffling.
Returns:
"""
if not shuffle_config:
shuffle_config = DEFAULT_SHUFFLE_CONFIG
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=shuffle,
common_queue_capacity=2 * batch_size,
common_queue_min=batch_size)
image_orig, label = provider.get(['image', 'label'])
image = preprocess_image(
image_orig, augment, central_crop_size, num_towers=dataset.num_of_views)
label_one_hot = slim.one_hot_encoding(label, dataset.num_char_classes)
images, images_orig, labels, labels_one_hot = (tf.compat.v1.train.shuffle_batch(
[image, image_orig, label, label_one_hot],
batch_size=batch_size,
num_threads=shuffle_config.num_batching_threads,
capacity=shuffle_config.queue_capacity,
min_after_dequeue=shuffle_config.min_after_dequeue))
return InputEndpoints(
images=images,
images_orig=images_orig,
labels=labels,
labels_one_hot=labels_one_hot)
| 7,389 | 36.323232 | 82 | py |
models | models-master/research/attention_ocr/python/train.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to train the Attention OCR model.
A simple usage example:
python train.py
"""
import collections
import logging
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import app
from tensorflow.compat.v1 import flags
from tensorflow.contrib.tfprof import model_analyzer
import data_provider
import common_flags
FLAGS = flags.FLAGS
common_flags.define()
# yapf: disable
flags.DEFINE_integer('task', 0,
'The Task ID. This value is used when training with '
'multiple workers to identify each worker.')
flags.DEFINE_integer('ps_tasks', 0,
'The number of parameter servers. If the value is 0, then'
' the parameters are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 60,
'The frequency with which summaries are saved, in '
'seconds.')
flags.DEFINE_integer('save_interval_secs', 600,
'Frequency in seconds of saving the model.')
flags.DEFINE_integer('max_number_of_steps', int(1e10),
'The maximum number of gradient steps.')
flags.DEFINE_string('checkpoint_inception', '',
'Checkpoint to recover inception weights from.')
flags.DEFINE_float('clip_gradient_norm', 2.0,
'If greater than 0 then the gradients would be clipped by '
'it.')
flags.DEFINE_bool('sync_replicas', False,
'If True will synchronize replicas during training.')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of gradients updates before updating params.')
flags.DEFINE_integer('total_num_replicas', 1,
'Total number of worker replicas.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_boolean('reset_train_dir', False,
'If true will delete all files in the train_log_dir')
flags.DEFINE_boolean('show_graph_stats', False,
'Output model size stats to stderr.')
# yapf: enable
TrainingHParams = collections.namedtuple('TrainingHParams', [
'learning_rate',
'optimizer',
'momentum',
'use_augment_input',
])
def get_training_hparams():
return TrainingHParams(
learning_rate=FLAGS.learning_rate,
optimizer=FLAGS.optimizer,
momentum=FLAGS.momentum,
use_augment_input=FLAGS.use_augment_input)
def create_optimizer(hparams):
"""Creates optimized based on the specified flags."""
if hparams.optimizer == 'momentum':
optimizer = tf.compat.v1.train.MomentumOptimizer(
hparams.learning_rate, momentum=hparams.momentum)
elif hparams.optimizer == 'adam':
optimizer = tf.compat.v1.train.AdamOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'adadelta':
optimizer = tf.compat.v1.train.AdadeltaOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'adagrad':
optimizer = tf.compat.v1.train.AdagradOptimizer(hparams.learning_rate)
elif hparams.optimizer == 'rmsprop':
optimizer = tf.compat.v1.train.RMSPropOptimizer(
hparams.learning_rate, momentum=hparams.momentum)
return optimizer
def train(loss, init_fn, hparams):
"""Wraps slim.learning.train to run a training loop.
Args:
loss: a loss tensor
init_fn: A callable to be executed after all other initialization is done.
hparams: a model hyper parameters
"""
optimizer = create_optimizer(hparams)
if FLAGS.sync_replicas:
replica_id = tf.constant(FLAGS.task, tf.int32, shape=())
optimizer = tf.LegacySyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
replica_id=replica_id,
total_num_replicas=FLAGS.total_num_replicas)
sync_optimizer = optimizer
startup_delay_steps = 0
else:
startup_delay_steps = 0
sync_optimizer = None
train_op = slim.learning.create_train_op(
loss,
optimizer,
summarize_gradients=True,
clip_gradient_norm=FLAGS.clip_gradient_norm)
slim.learning.train(
train_op=train_op,
logdir=FLAGS.train_log_dir,
graph=loss.graph,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
startup_delay_steps=startup_delay_steps,
sync_optimizer=sync_optimizer,
init_fn=init_fn)
def prepare_training_dir():
if not tf.io.gfile.exists(FLAGS.train_log_dir):
logging.info('Create a new training directory %s', FLAGS.train_log_dir)
tf.io.gfile.makedirs(FLAGS.train_log_dir)
else:
if FLAGS.reset_train_dir:
logging.info('Reset the training directory %s', FLAGS.train_log_dir)
tf.io.gfile.rmtree(FLAGS.train_log_dir)
tf.io.gfile.makedirs(FLAGS.train_log_dir)
else:
logging.info('Use already existing training directory %s',
FLAGS.train_log_dir)
def calculate_graph_metrics():
param_stats = model_analyzer.print_model_analysis(
tf.compat.v1.get_default_graph(),
tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
return param_stats.total_parameters
def main(_):
prepare_training_dir()
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(dataset.num_char_classes,
dataset.max_sequence_length,
dataset.num_of_views, dataset.null_code)
hparams = get_training_hparams()
# If ps_tasks is zero, the local device is used. When using multiple
# (non-local) replicas, the ReplicaDeviceSetter distributes the variables
# across the different devices.
device_setter = tf.compat.v1.train.replica_device_setter(
FLAGS.ps_tasks, merge_devices=True)
with tf.device(device_setter):
data = data_provider.get_data(
dataset,
FLAGS.batch_size,
augment=hparams.use_augment_input,
central_crop_size=common_flags.get_crop_size())
endpoints = model.create_base(data.images, data.labels_one_hot)
total_loss = model.create_loss(data, endpoints)
model.create_summaries(data, endpoints, dataset.charset, is_training=True)
init_fn = model.create_init_fn_to_restore(FLAGS.checkpoint,
FLAGS.checkpoint_inception)
if FLAGS.show_graph_stats:
logging.info('Total number of weights in the graph: %s',
calculate_graph_metrics())
train(total_loss, init_fn, hparams)
if __name__ == '__main__':
app.run()
| 7,393 | 34.209524 | 80 | py |
models | models-master/research/attention_ocr/python/model_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the model."""
import string
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import model
import data_provider
def create_fake_charset(num_char_classes):
charset = {}
for i in range(num_char_classes):
charset[i] = string.printable[i % len(string.printable)]
return charset
class ModelTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
self.rng = np.random.RandomState([11, 23, 50])
self.batch_size = 4
self.image_width = 600
self.image_height = 30
self.seq_length = 40
self.num_char_classes = 72
self.null_code = 62
self.num_views = 4
feature_size = 288
self.conv_tower_shape = (self.batch_size, 1, 72, feature_size)
self.features_shape = (self.batch_size, self.seq_length, feature_size)
self.chars_logit_shape = (self.batch_size, self.seq_length,
self.num_char_classes)
self.length_logit_shape = (self.batch_size, self.seq_length + 1)
# Placeholder knows image dimensions, but not batch size.
self.input_images = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.image_height, self.image_width, 3),
name='input_node')
self.initialize_fakes()
def initialize_fakes(self):
self.images_shape = (self.batch_size, self.image_height, self.image_width,
3)
self.fake_images = self.rng.randint(
low=0, high=255, size=self.images_shape).astype('float32')
self.fake_conv_tower_np = self.rng.randn(*self.conv_tower_shape).astype(
'float32')
self.fake_conv_tower = tf.constant(self.fake_conv_tower_np)
self.fake_logits = tf.constant(
self.rng.randn(*self.chars_logit_shape).astype('float32'))
self.fake_labels = tf.constant(
self.rng.randint(
low=0,
high=self.num_char_classes,
size=(self.batch_size, self.seq_length)).astype('int64'))
def create_model(self, charset=None):
return model.Model(
self.num_char_classes,
self.seq_length,
num_views=4,
null_code=62,
charset=charset)
def test_char_related_shapes(self):
charset = create_fake_charset(self.num_char_classes)
ocr_model = self.create_model(charset=charset)
with self.test_session() as sess:
endpoints_tf = ocr_model.create_base(
images=self.input_images, labels_one_hot=None)
sess.run(tf.compat.v1.global_variables_initializer())
tf.compat.v1.tables_initializer().run()
endpoints = sess.run(
endpoints_tf, feed_dict={self.input_images: self.fake_images})
self.assertEqual(
(self.batch_size, self.seq_length, self.num_char_classes),
endpoints.chars_logit.shape)
self.assertEqual(
(self.batch_size, self.seq_length, self.num_char_classes),
endpoints.chars_log_prob.shape)
self.assertEqual((self.batch_size, self.seq_length),
endpoints.predicted_chars.shape)
self.assertEqual((self.batch_size, self.seq_length),
endpoints.predicted_scores.shape)
self.assertEqual((self.batch_size,), endpoints.predicted_text.shape)
self.assertEqual((self.batch_size,), endpoints.predicted_conf.shape)
self.assertEqual((self.batch_size,), endpoints.normalized_seq_conf.shape)
def test_predicted_scores_are_within_range(self):
ocr_model = self.create_model()
_, _, scores = ocr_model.char_predictions(self.fake_logits)
with self.test_session() as sess:
scores_np = sess.run(
scores, feed_dict={self.input_images: self.fake_images})
values_in_range = (scores_np >= 0.0) & (scores_np <= 1.0)
self.assertTrue(
np.all(values_in_range),
msg=('Scores contains out of the range values %s' %
scores_np[np.logical_not(values_in_range)]))
def test_conv_tower_shape(self):
with self.test_session() as sess:
ocr_model = self.create_model()
conv_tower = ocr_model.conv_tower_fn(self.input_images)
sess.run(tf.compat.v1.global_variables_initializer())
conv_tower_np = sess.run(
conv_tower, feed_dict={self.input_images: self.fake_images})
self.assertEqual(self.conv_tower_shape, conv_tower_np.shape)
def test_model_size_less_then1_gb(self):
# NOTE: Actual amount of memory occupied my TF during training will be at
# least 4X times bigger because of space need to store original weights,
# updates, gradients and variances. It also depends on the type of used
# optimizer.
ocr_model = self.create_model()
ocr_model.create_base(images=self.input_images, labels_one_hot=None)
with self.test_session() as sess:
tfprof_root = tf.compat.v1.profiler.profile(
sess.graph,
options=tf.compat.v1.profiler.ProfileOptionBuilder
.trainable_variables_parameter())
model_size_bytes = 4 * tfprof_root.total_parameters
self.assertLess(model_size_bytes, 1 * 2**30)
def test_create_summaries_is_runnable(self):
ocr_model = self.create_model()
data = data_provider.InputEndpoints(
images=self.fake_images,
images_orig=self.fake_images,
labels=self.fake_labels,
labels_one_hot=slim.one_hot_encoding(self.fake_labels,
self.num_char_classes))
endpoints = ocr_model.create_base(
images=self.fake_images, labels_one_hot=None)
charset = create_fake_charset(self.num_char_classes)
summaries = ocr_model.create_summaries(
data, endpoints, charset, is_training=False)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.local_variables_initializer())
tf.compat.v1.tables_initializer().run()
sess.run(summaries) # just check it is runnable
def test_sequence_loss_function_without_label_smoothing(self):
model = self.create_model()
model.set_mparam('sequence_loss_fn', label_smoothing=0)
loss = model.sequence_loss_fn(self.fake_logits, self.fake_labels)
with self.test_session() as sess:
loss_np = sess.run(loss, feed_dict={self.input_images: self.fake_images})
# This test checks that the loss function is 'runnable'.
self.assertEqual(loss_np.shape, tuple())
def encode_coordinates_alt(self, net):
"""An alternative implemenation for the encoding coordinates.
Args:
net: a tensor of shape=[batch_size, height, width, num_features]
Returns:
a list of tensors with encoded image coordinates in them.
"""
batch_size = tf.shape(input=net)[0]
_, h, w, _ = net.shape.as_list()
h_loc = [
tf.tile(
tf.reshape(
tf.contrib.layers.one_hot_encoding(
tf.constant([i]), num_classes=h), [h, 1]), [1, w])
for i in range(h)
]
h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
w_loc = [
tf.tile(
tf.contrib.layers.one_hot_encoding(
tf.constant([i]), num_classes=w),
[h, 1]) for i in range(w)
]
w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
loc = tf.concat([h_loc, w_loc], 2)
loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
return tf.concat([net, loc], 3)
def test_encoded_coordinates_have_correct_shape(self):
model = self.create_model()
model.set_mparam('encode_coordinates_fn', enabled=True)
conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower)
with self.test_session() as sess:
conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
batch_size, height, width, feature_size = self.conv_tower_shape
self.assertEqual(conv_w_coords.shape,
(batch_size, height, width, feature_size + height + width))
def test_disabled_coordinate_encoding_returns_features_unchanged(self):
model = self.create_model()
model.set_mparam('encode_coordinates_fn', enabled=False)
conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower)
with self.test_session() as sess:
conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
self.assertAllEqual(conv_w_coords, self.fake_conv_tower_np)
def test_coordinate_encoding_is_correct_for_simple_example(self):
shape = (1, 2, 3, 4) # batch_size, height, width, feature_size
fake_conv_tower = tf.constant(2 * np.ones(shape), dtype=tf.float32)
model = self.create_model()
model.set_mparam('encode_coordinates_fn', enabled=True)
conv_w_coords_tf = model.encode_coordinates_fn(fake_conv_tower)
with self.test_session() as sess:
conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
# Original features
self.assertAllEqual(conv_w_coords[0, :, :, :4],
[[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]],
[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]])
# Encoded coordinates
self.assertAllEqual(conv_w_coords[0, :, :, 4:],
[[[1, 0, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
[[0, 1, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 0, 1]]])
def test_alt_implementation_of_coordinate_encoding_returns_same_values(self):
model = self.create_model()
model.set_mparam('encode_coordinates_fn', enabled=True)
conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower)
conv_w_coords_alt_tf = self.encode_coordinates_alt(self.fake_conv_tower)
with self.test_session() as sess:
conv_w_coords_tf, conv_w_coords_alt_tf = sess.run(
[conv_w_coords_tf, conv_w_coords_alt_tf])
self.assertAllEqual(conv_w_coords_tf, conv_w_coords_alt_tf)
def test_predicted_text_has_correct_shape_w_charset(self):
charset = create_fake_charset(self.num_char_classes)
ocr_model = self.create_model(charset=charset)
with self.test_session() as sess:
endpoints_tf = ocr_model.create_base(
images=self.fake_images, labels_one_hot=None)
sess.run(tf.compat.v1.global_variables_initializer())
tf.compat.v1.tables_initializer().run()
endpoints = sess.run(endpoints_tf)
self.assertEqual(endpoints.predicted_text.shape, (self.batch_size,))
self.assertEqual(len(endpoints.predicted_text[0]), self.seq_length)
class CharsetMapperTest(tf.test.TestCase):
def test_text_corresponds_to_ids(self):
charset = create_fake_charset(36)
ids = tf.constant([[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]],
dtype=tf.int64)
charset_mapper = model.CharsetMapper(charset)
with self.test_session() as sess:
tf.compat.v1.tables_initializer().run()
text = sess.run(charset_mapper.get_text(ids))
self.assertAllEqual(text, [b'hello', b'world'])
if __name__ == '__main__':
tf.test.main()
| 11,680 | 37.807309 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/unittest_utils_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unittest_utils."""
import numpy as np
import io
from PIL import Image as PILImage
import tensorflow as tf
from datasets import unittest_utils
class UnittestUtilsTest(tf.test.TestCase):
def test_creates_an_image_of_specified_shape(self):
image, _ = unittest_utils.create_random_image('PNG', (10, 20, 3))
self.assertEqual(image.shape, (10, 20, 3))
def test_encoded_image_corresponds_to_numpy_array(self):
image, encoded = unittest_utils.create_random_image('PNG', (20, 10, 3))
pil_image = PILImage.open(io.BytesIO(encoded))
self.assertAllEqual(image, np.array(pil_image))
def test_created_example_has_correct_values(self):
example_serialized = unittest_utils.create_serialized_example({
'labels': [1, 2, 3],
'data': [b'FAKE']
})
example = tf.train.Example()
example.ParseFromString(example_serialized)
self.assertProtoEquals("""
features {
feature {
key: "labels"
value { int64_list {
value: 1
value: 2
value: 3
}}
}
feature {
key: "data"
value { bytes_list {
value: "FAKE"
}}
}
}
""", example)
if __name__ == '__main__':
tf.test.main()
| 1,977 | 29.430769 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/fsns_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for FSNS datasets module."""
import collections
import os
import tensorflow as tf
from tensorflow.contrib import slim
from datasets import fsns
from datasets import unittest_utils
from tensorflow.compat.v1 import flags
FLAGS = flags.FLAGS
def get_test_split():
config = fsns.DEFAULT_CONFIG.copy()
config['splits'] = {'test': {'size': 5, 'pattern': 'fsns-00000-of-00001'}}
return fsns.get_split('test', dataset_dir(), config)
def dataset_dir():
return os.path.join(os.path.dirname(__file__), 'testdata/fsns')
class FsnsTest(tf.test.TestCase):
def test_decodes_example_proto(self):
expected_label = range(37)
expected_image, encoded = unittest_utils.create_random_image(
'PNG', shape=(150, 600, 3))
serialized = unittest_utils.create_serialized_example({
'image/encoded': [encoded],
'image/format': [b'PNG'],
'image/class':
expected_label,
'image/unpadded_class':
range(10),
'image/text': [b'Raw text'],
'image/orig_width': [150],
'image/width': [600]
})
decoder = fsns.get_split('train', dataset_dir()).decoder
with self.test_session() as sess:
data_tuple = collections.namedtuple('DecodedData', decoder.list_items())
data = sess.run(data_tuple(*decoder.decode(serialized)))
self.assertAllEqual(expected_image, data.image)
self.assertAllEqual(expected_label, data.label)
self.assertEqual([b'Raw text'], data.text)
self.assertEqual([1], data.num_of_views)
def test_label_has_shape_defined(self):
serialized = 'fake'
decoder = fsns.get_split('train', dataset_dir()).decoder
[label_tf] = decoder.decode(serialized, ['label'])
self.assertEqual(label_tf.get_shape().dims[0], 37)
def test_dataset_tuple_has_all_extra_attributes(self):
dataset = fsns.get_split('train', dataset_dir())
self.assertTrue(dataset.charset)
self.assertTrue(dataset.num_char_classes)
self.assertTrue(dataset.num_of_views)
self.assertTrue(dataset.max_sequence_length)
self.assertTrue(dataset.null_code)
def test_can_use_the_test_data(self):
batch_size = 1
dataset = get_test_split()
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=True,
common_queue_capacity=2 * batch_size,
common_queue_min=batch_size)
image_tf, label_tf = provider.get(['image', 'label'])
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
with slim.queues.QueueRunners(sess):
image_np, label_np = sess.run([image_tf, label_tf])
self.assertEqual((150, 600, 3), image_np.shape)
self.assertEqual((37, ), label_np.shape)
if __name__ == '__main__':
tf.test.main()
| 3,450 | 31.866667 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/unittest_utils.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to make unit testing easier."""
import numpy as np
import io
from PIL import Image as PILImage
import tensorflow as tf
def create_random_image(image_format, shape):
"""Creates an image with random values.
Args:
image_format: An image format (PNG or JPEG).
shape: A tuple with image shape (including channels).
Returns:
A tuple (<numpy ndarray>, <a string with encoded image>)
"""
image = np.random.randint(low=0, high=255, size=shape, dtype='uint8')
fd = io.BytesIO()
image_pil = PILImage.fromarray(image)
image_pil.save(fd, image_format, subsampling=0, quality=100)
return image, fd.getvalue()
def create_serialized_example(name_to_values):
"""Creates a tf.Example proto using a dictionary.
It automatically detects type of values and define a corresponding feature.
Args:
name_to_values: A dictionary.
Returns:
tf.Example proto.
"""
example = tf.train.Example()
for name, values in name_to_values.items():
feature = example.features.feature[name]
if isinstance(values[0], str) or isinstance(values[0], bytes):
add = feature.bytes_list.value.extend
elif isinstance(values[0], float):
add = feature.float32_list.value.extend
elif isinstance(values[0], int):
add = feature.int64_list.value.extend
else:
raise AssertionError('Unsupported type: %s' % type(values[0]))
add(values)
return example.SerializeToString()
| 2,125 | 32.21875 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/__init__.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from datasets import fsns
from datasets import fsns_test
__all__ = [fsns, fsns_test]
| 775 | 37.8 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/fsns.py | # -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration to read FSNS dataset https://goo.gl/3Ldm8v."""
import os
import re
import tensorflow as tf
from tensorflow.contrib import slim
import logging
DEFAULT_DATASET_DIR = os.path.join(os.path.dirname(__file__), 'data', 'fsns')
# The dataset configuration, should be used only as a default value.
DEFAULT_CONFIG = {
'name': 'FSNS',
'splits': {
'train': {
'size': 1044868,
'pattern': 'train/train*'
},
'test': {
'size': 20404,
'pattern': 'test/test*'
},
'validation': {
'size': 16150,
'pattern': 'validation/validation*'
}
},
'charset_filename': 'charset_size=134.txt',
'image_shape': (150, 600, 3),
'num_of_views': 4,
'max_sequence_length': 37,
'null_code': 133,
'items_to_descriptions': {
'image': 'A [150 x 600 x 3] color image.',
'label': 'Characters codes.',
'text': 'A unicode string.',
'length': 'A length of the encoded text.',
'num_of_views': 'A number of different views stored within the image.'
}
}
def read_charset(filename, null_character=u'\u2591'):
"""Reads a charset definition from a tab separated text file.
charset file has to have format compatible with the FSNS dataset.
Args:
filename: a path to the charset file.
null_character: a unicode character used to replace '<null>' character. the
default value is a light shade block '░'.
Returns:
a dictionary with keys equal to character codes and values - unicode
characters.
"""
pattern = re.compile(r'(\d+)\t(.+)')
charset = {}
with tf.io.gfile.GFile(filename) as f:
for i, line in enumerate(f):
m = pattern.match(line)
if m is None:
logging.warning('incorrect charset file. line #%d: %s', i, line)
continue
code = int(m.group(1))
char = m.group(2)
if char == '<nul>':
char = null_character
charset[code] = char
return charset
class _NumOfViewsHandler(slim.tfexample_decoder.ItemHandler):
"""Convenience handler to determine number of views stored in an image."""
def __init__(self, width_key, original_width_key, num_of_views):
super(_NumOfViewsHandler, self).__init__([width_key, original_width_key])
self._width_key = width_key
self._original_width_key = original_width_key
self._num_of_views = num_of_views
def tensors_to_item(self, keys_to_tensors):
return tf.cast(
self._num_of_views * keys_to_tensors[self._original_width_key] /
keys_to_tensors[self._width_key], dtype=tf.int64)
def get_split(split_name, dataset_dir=None, config=None):
"""Returns a dataset tuple for FSNS dataset.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources, by default it uses
a predefined CNS path (see DEFAULT_DATASET_DIR).
config: A dictionary with dataset configuration. If None - will use the
DEFAULT_CONFIG.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if not dataset_dir:
dataset_dir = DEFAULT_DATASET_DIR
if not config:
config = DEFAULT_CONFIG
if split_name not in config['splits']:
raise ValueError('split name %s was not recognized.' % split_name)
logging.info('Using %s dataset split_name=%s dataset_dir=%s', config['name'],
split_name, dataset_dir)
# Ignores the 'image/height' feature.
zero = tf.zeros([1], dtype=tf.int64)
keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.io.FixedLenFeature((), tf.string, default_value='png'),
'image/width':
tf.io.FixedLenFeature([1], tf.int64, default_value=zero),
'image/orig_width':
tf.io.FixedLenFeature([1], tf.int64, default_value=zero),
'image/class':
tf.io.FixedLenFeature([config['max_sequence_length']], tf.int64),
'image/unpadded_class':
tf.io.VarLenFeature(tf.int64),
'image/text':
tf.io.FixedLenFeature([1], tf.string, default_value=''),
}
items_to_handlers = {
'image':
slim.tfexample_decoder.Image(
shape=config['image_shape'],
image_key='image/encoded',
format_key='image/format'),
'label':
slim.tfexample_decoder.Tensor(tensor_key='image/class'),
'text':
slim.tfexample_decoder.Tensor(tensor_key='image/text'),
'num_of_views':
_NumOfViewsHandler(
width_key='image/width',
original_width_key='image/orig_width',
num_of_views=config['num_of_views'])
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
charset_file = os.path.join(dataset_dir, config['charset_filename'])
charset = read_charset(charset_file)
file_pattern = os.path.join(dataset_dir,
config['splits'][split_name]['pattern'])
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=tf.compat.v1.TFRecordReader,
decoder=decoder,
num_samples=config['splits'][split_name]['size'],
items_to_descriptions=config['items_to_descriptions'],
# additional parameters for convenience.
charset=charset,
charset_file=charset_file,
image_shape=config['image_shape'],
num_char_classes=len(charset),
num_of_views=config['num_of_views'],
max_sequence_length=config['max_sequence_length'],
null_code=config['null_code'])
| 6,340 | 33.091398 | 80 | py |
models | models-master/research/attention_ocr/python/datasets/testdata/fsns/download_data.py | import urllib.request
import tensorflow as tf
import itertools
URL = 'http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001'
DST_ORIG = 'fsns-00000-of-00001.orig'
DST = 'fsns-00000-of-00001'
KEEP_NUM_RECORDS = 5
print('Downloading %s ...' % URL)
urllib.request.urlretrieve(URL, DST_ORIG)
print('Writing %d records from %s to %s ...' %
(KEEP_NUM_RECORDS, DST_ORIG, DST))
with tf.io.TFRecordWriter(DST) as writer:
for raw_record in itertools.islice(tf.compat.v1.python_io.tf_record_iterator(DST_ORIG), KEEP_NUM_RECORDS):
writer.write(raw_record)
| 590 | 31.833333 | 110 | py |
models | models-master/research/autoaugment/custom_ops.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Ops that have different behavior during training or eval have an is_training
parameter.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
def variable(name, shape, dtype, initializer, trainable):
"""Returns a TF variable with the passed in specifications."""
var = tf.get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable)
return var
def global_avg_pool(x, scope=None):
"""Average pools away spatial height and width dimension of 4D tensor."""
assert x.get_shape().ndims == 4
with tf.name_scope(scope, 'global_avg_pool', [x]):
kernel_size = (1, int(x.shape[1]), int(x.shape[2]), 1)
squeeze_dims = (1, 2)
result = tf.nn.avg_pool(
x,
ksize=kernel_size,
strides=(1, 1, 1, 1),
padding='VALID',
data_format='NHWC')
return tf.squeeze(result, squeeze_dims)
def zero_pad(inputs, in_filter, out_filter):
"""Zero pads `input` tensor to have `out_filter` number of filters."""
outputs = tf.pad(inputs, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
return outputs
@tf.contrib.framework.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
is_training=True,
reuse=None,
scope=None):
"""Small wrapper around tf.contrib.layers.batch_norm."""
return tf.contrib.layers.batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=None,
param_initializers=None,
updates_collections=tf.GraphKeys.UPDATE_OPS,
is_training=is_training,
reuse=reuse,
trainable=True,
fused=True,
data_format='NHWC',
zero_debias_moving_mean=False,
scope=scope)
def stride_arr(stride_h, stride_w):
return [1, stride_h, stride_w, 1]
@tf.contrib.framework.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
scope=None,
reuse=None):
"""Adds a 2D convolution.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input.
Args:
inputs: a 4D tensor in NHWC format.
num_filters_out: the number of output filters.
kernel_size: an int specifying the kernel height and width size.
stride: an int specifying the height and width stride.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused.
Returns:
a tensor that is the result of a convolution being applied to `inputs`.
"""
with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
num_filters_in = int(inputs.shape[3])
weights_shape = [kernel_size, kernel_size, num_filters_in, num_filters_out]
# Initialization
n = int(weights_shape[0] * weights_shape[1] * weights_shape[3])
weights_initializer = tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n))
weights = variable(
name='weights',
shape=weights_shape,
dtype=tf.float32,
initializer=weights_initializer,
trainable=True)
strides = stride_arr(stride, stride)
outputs = tf.nn.conv2d(
inputs, weights, strides, padding='SAME', data_format='NHWC')
return outputs
@tf.contrib.framework.add_arg_scope
def fc(inputs,
num_units_out,
scope=None,
reuse=None):
"""Creates a fully connected layer applied to `inputs`.
Args:
inputs: a tensor that the fully connected layer will be applied to. It
will be reshaped if it is not 2D.
num_units_out: the number of output units in the layer.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused.
Returns:
a tensor that is the result of applying a linear matrix to `inputs`.
"""
if len(inputs.shape) > 2:
inputs = tf.reshape(inputs, [int(inputs.shape[0]), -1])
with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
num_units_in = inputs.shape[1]
weights_shape = [num_units_in, num_units_out]
unif_init_range = 1.0 / (num_units_out)**(0.5)
weights_initializer = tf.random_uniform_initializer(
-unif_init_range, unif_init_range)
weights = variable(
name='weights',
shape=weights_shape,
dtype=tf.float32,
initializer=weights_initializer,
trainable=True)
bias_initializer = tf.constant_initializer(0.0)
biases = variable(
name='biases',
shape=[num_units_out,],
dtype=tf.float32,
initializer=bias_initializer,
trainable=True)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
return outputs
@tf.contrib.framework.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Wrapper around tf.nn.avg_pool."""
with tf.name_scope(scope, 'AvgPool', [inputs]):
kernel = stride_arr(kernel_size, kernel_size)
strides = stride_arr(stride, stride)
return tf.nn.avg_pool(
inputs,
ksize=kernel,
strides=strides,
padding=padding,
data_format='NHWC')
| 6,246 | 30.550505 | 80 | py |
models | models-master/research/autoaugment/augmentation_transforms.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transforms used in the Augmentation Policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
# pylint:disable=g-multiple-import
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
# pylint:enable=g-multiple-import
IMAGE_SIZE = 32
# What is the dataset mean and std of the images on the training set
MEANS = [0.49139968, 0.48215841, 0.44653091]
STDS = [0.24703223, 0.24348513, 0.26158784]
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def cutout_numpy(img, size=16):
"""Apply cutout with mask of shape `size` x `size` to `img`.
The cutout operation is from the paper https://arxiv.org/abs/1708.04552.
This operation applies a `size`x`size` mask of zeros to a random location
within `img`.
Args:
img: Numpy image that cutout will be applied to.
size: Height/width of the cutout mask that will be
Returns:
A numpy tensor that is the result of applying the cutout mask to `img`.
"""
img_height, img_width, num_channels = (img.shape[0], img.shape[1],
img.shape[2])
assert len(img.shape) == 3
mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)
return img * mask
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
def pil_wrap(img):
"""Convert the `img` numpy tensor to a PIL Image."""
return Image.fromarray(
np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')
def pil_unwrap(pil_img):
"""Converts the PIL img to a numpy array."""
pic_array = (np.array(pil_img.getdata()).reshape((32, 32, 4)) / 255.0)
i1, i2 = np.where(pic_array[:, :, 3] == 0)
pic_array = (pic_array[:, :, :3] - MEANS) / STDS
pic_array[i1, i2] = [0, 0, 0]
return pic_array
def apply_policy(policy, img):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
pil_img = pil_wrap(img)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
def do_transform(self, image, level):
f = self.pil_transformer(PARAMETER_MAX, level)
return pil_unwrap(f(pil_wrap(image)))
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')
posterize = TransformT('Posterize', _posterize_impl)
def _shear_x_impl(pil_img, level):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level))
resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, 256)
return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
solarize = TransformT('Solarize', _solarize_impl)
def _cutout_pil_impl(pil_img, level):
"""Apply cutout to pil_img at the specified level."""
size = int_parameter(level, 20)
if size <= 0:
return pil_img
img_height, img_width, num_channels = (32, 32, 3)
_, upper_coord, lower_coord = (
create_cutout_mask(img_height, img_width, num_channels, size))
pixels = pil_img.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly
return pil_img
cutout = TransformT('Cutout', _cutout_pil_impl)
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level):
v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}
TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()
| 13,751 | 29.424779 | 80 | py |
models | models-master/research/autoaugment/shake_drop.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the Shake-Shake Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import custom_ops as ops
import tensorflow as tf
def round_int(x):
"""Rounds `x` and then converts to an int."""
return int(math.floor(x + 0.5))
def shortcut(x, output_filters, stride):
"""Applies strided avg pool or zero padding to make output_filters match x."""
num_filters = int(x.shape[3])
if stride == 2:
x = ops.avg_pool(x, 2, stride=stride, padding='SAME')
if num_filters != output_filters:
diff = output_filters - num_filters
assert diff > 0
# Zero padd diff zeros
padding = [[0, 0], [0, 0], [0, 0], [0, diff]]
x = tf.pad(x, padding)
return x
def calc_prob(curr_layer, total_layers, p_l):
"""Calculates drop prob depending on the current layer."""
return 1 - (float(curr_layer) / total_layers) * p_l
def bottleneck_layer(x, n, stride, prob, is_training, alpha, beta):
"""Bottleneck layer for shake drop model."""
assert alpha[1] > alpha[0]
assert beta[1] > beta[0]
with tf.variable_scope('bottleneck_{}'.format(prob)):
input_layer = x
x = ops.batch_norm(x, scope='bn_1_pre')
x = ops.conv2d(x, n, 1, scope='1x1_conv_contract')
x = ops.batch_norm(x, scope='bn_1_post')
x = tf.nn.relu(x)
x = ops.conv2d(x, n, 3, stride=stride, scope='3x3')
x = ops.batch_norm(x, scope='bn_2')
x = tf.nn.relu(x)
x = ops.conv2d(x, n * 4, 1, scope='1x1_conv_expand')
x = ops.batch_norm(x, scope='bn_3')
# Apply regularization here
# Sample bernoulli with prob
if is_training:
batch_size = tf.shape(x)[0]
bern_shape = [batch_size, 1, 1, 1]
random_tensor = prob
random_tensor += tf.random_uniform(bern_shape, dtype=tf.float32)
binary_tensor = tf.floor(random_tensor)
alpha_values = tf.random_uniform(
[batch_size, 1, 1, 1], minval=alpha[0], maxval=alpha[1],
dtype=tf.float32)
beta_values = tf.random_uniform(
[batch_size, 1, 1, 1], minval=beta[0], maxval=beta[1],
dtype=tf.float32)
rand_forward = (
binary_tensor + alpha_values - binary_tensor * alpha_values)
rand_backward = (
binary_tensor + beta_values - binary_tensor * beta_values)
x = x * rand_backward + tf.stop_gradient(x * rand_forward -
x * rand_backward)
else:
expected_alpha = (alpha[1] + alpha[0])/2
# prob is the expectation of the bernoulli variable
x = (prob + expected_alpha - prob * expected_alpha) * x
res = shortcut(input_layer, n * 4, stride)
return x + res
def build_shake_drop_model(images, num_classes, is_training):
"""Builds the PyramidNet Shake-Drop model.
Build the PyramidNet Shake-Drop model from https://arxiv.org/abs/1802.02375.
Args:
images: Tensor of images that will be fed into the Wide ResNet Model.
num_classes: Number of classed that the model needs to predict.
is_training: Is the model training or not.
Returns:
The logits of the PyramidNet Shake-Drop model.
"""
# ShakeDrop Hparams
p_l = 0.5
alpha_shake = [-1, 1]
beta_shake = [0, 1]
# PyramidNet Hparams
alpha = 200
depth = 272
# This is for the bottleneck architecture specifically
n = int((depth - 2) / 9)
start_channel = 16
add_channel = alpha / (3 * n)
# Building the models
x = images
x = ops.conv2d(x, 16, 3, scope='init_conv')
x = ops.batch_norm(x, scope='init_bn')
layer_num = 1
total_layers = n * 3
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 1, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
for _ in range(1, n):
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 1, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 2, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
for _ in range(1, n):
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 1, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 2, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
for _ in range(1, n):
start_channel += add_channel
prob = calc_prob(layer_num, total_layers, p_l)
x = bottleneck_layer(
x, round_int(start_channel), 1, prob, is_training, alpha_shake,
beta_shake)
layer_num += 1
assert layer_num - 1 == total_layers
x = ops.batch_norm(x, scope='final_bn')
x = tf.nn.relu(x)
x = ops.global_avg_pool(x)
# Fully connected
logits = ops.fc(x, num_classes)
return logits
| 5,826 | 31.553073 | 80 | py |
models | models-master/research/autoaugment/data_utils.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data utils for CIFAR-10 and CIFAR-100."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import cPickle
import os
import augmentation_transforms
import numpy as np
import policies as found_policies
import tensorflow as tf
# pylint:disable=logging-format-interpolation
class DataSet(object):
"""Dataset object that produces augmented training and eval data."""
def __init__(self, hparams):
self.hparams = hparams
self.epochs = 0
self.curr_train_index = 0
all_labels = []
self.good_policies = found_policies.good_policies()
# Determine how many databatched to load
num_data_batches_to_load = 5
total_batches_to_load = num_data_batches_to_load
train_batches_to_load = total_batches_to_load
assert hparams.train_size + hparams.validation_size <= 50000
if hparams.eval_test:
total_batches_to_load += 1
# Determine how many images we have loaded
total_dataset_size = 10000 * num_data_batches_to_load
train_dataset_size = total_dataset_size
if hparams.eval_test:
total_dataset_size += 10000
if hparams.dataset == 'cifar10':
all_data = np.empty((total_batches_to_load, 10000, 3072), dtype=np.uint8)
elif hparams.dataset == 'cifar100':
assert num_data_batches_to_load == 5
all_data = np.empty((1, 50000, 3072), dtype=np.uint8)
if hparams.eval_test:
test_data = np.empty((1, 10000, 3072), dtype=np.uint8)
if hparams.dataset == 'cifar10':
tf.logging.info('Cifar10')
datafiles = [
'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5']
datafiles = datafiles[:train_batches_to_load]
if hparams.eval_test:
datafiles.append('test_batch')
num_classes = 10
elif hparams.dataset == 'cifar100':
datafiles = ['train']
if hparams.eval_test:
datafiles.append('test')
num_classes = 100
else:
raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)
if hparams.dataset != 'test':
for file_num, f in enumerate(datafiles):
d = unpickle(os.path.join(hparams.data_path, f))
if f == 'test':
test_data[0] = copy.deepcopy(d['data'])
all_data = np.concatenate([all_data, test_data], axis=1)
else:
all_data[file_num] = copy.deepcopy(d['data'])
if hparams.dataset == 'cifar10':
labels = np.array(d['labels'])
else:
labels = np.array(d['fine_labels'])
nsamples = len(labels)
for idx in range(nsamples):
all_labels.append(labels[idx])
all_data = all_data.reshape(total_dataset_size, 3072)
all_data = all_data.reshape(-1, 3, 32, 32)
all_data = all_data.transpose(0, 2, 3, 1).copy()
all_data = all_data / 255.0
mean = augmentation_transforms.MEANS
std = augmentation_transforms.STDS
tf.logging.info('mean:{} std: {}'.format(mean, std))
all_data = (all_data - mean) / std
all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]
assert len(all_data) == len(all_labels)
tf.logging.info(
'In CIFAR10 loader, number of images: {}'.format(len(all_data)))
# Break off test data
if hparams.eval_test:
self.test_images = all_data[train_dataset_size:]
self.test_labels = all_labels[train_dataset_size:]
# Shuffle the rest of the data
all_data = all_data[:train_dataset_size]
all_labels = all_labels[:train_dataset_size]
np.random.seed(0)
perm = np.arange(len(all_data))
np.random.shuffle(perm)
all_data = all_data[perm]
all_labels = all_labels[perm]
# Break into train and val
train_size, val_size = hparams.train_size, hparams.validation_size
assert 50000 >= train_size + val_size
self.train_images = all_data[:train_size]
self.train_labels = all_labels[:train_size]
self.val_images = all_data[train_size:train_size + val_size]
self.val_labels = all_labels[train_size:train_size + val_size]
self.num_train = self.train_images.shape[0]
def next_batch(self):
"""Return the next minibatch of augmented data."""
next_train_index = self.curr_train_index + self.hparams.batch_size
if next_train_index > self.num_train:
# Increase epoch number
epoch = self.epochs + 1
self.reset()
self.epochs = epoch
batched_data = (
self.train_images[self.curr_train_index:
self.curr_train_index + self.hparams.batch_size],
self.train_labels[self.curr_train_index:
self.curr_train_index + self.hparams.batch_size])
final_imgs = []
images, labels = batched_data
for data in images:
epoch_policy = self.good_policies[np.random.choice(
len(self.good_policies))]
final_img = augmentation_transforms.apply_policy(
epoch_policy, data)
final_img = augmentation_transforms.random_flip(
augmentation_transforms.zero_pad_and_crop(final_img, 4))
# Apply cutout
final_img = augmentation_transforms.cutout_numpy(final_img)
final_imgs.append(final_img)
batched_data = (np.array(final_imgs, np.float32), labels)
self.curr_train_index += self.hparams.batch_size
return batched_data
def reset(self):
"""Reset training data and index into the training data."""
self.epochs = 0
# Shuffle the training data
perm = np.arange(self.num_train)
np.random.shuffle(perm)
assert self.num_train == self.train_images.shape[
0], 'Error incorrect shuffling mask'
self.train_images = self.train_images[perm]
self.train_labels = self.train_labels[perm]
self.curr_train_index = 0
def unpickle(f):
tf.logging.info('loading file: {}'.format(f))
fo = tf.gfile.Open(f, 'r')
d = cPickle.load(fo)
fo.close()
return d
| 6,587 | 34.610811 | 80 | py |
models | models-master/research/autoaugment/shake_shake.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the Shake-Shake Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import custom_ops as ops
import tensorflow as tf
def _shake_shake_skip_connection(x, output_filters, stride):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = int(x.shape[3])
if curr_filters == output_filters:
return x
stride_spec = ops.stride_arr(stride, stride)
# Skip path 1
path1 = tf.nn.avg_pool(
x, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
path1 = ops.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's then crop
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
path2 = ops.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = ops.batch_norm(final_path, scope='final_path_bn')
return final_path
def _shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
is_training):
"""Building a 2 branching convnet."""
x = tf.nn.relu(x)
x = ops.conv2d(x, output_filters, 3, stride=stride, scope='conv1')
x = ops.batch_norm(x, scope='bn1')
x = tf.nn.relu(x)
x = ops.conv2d(x, output_filters, 3, scope='conv2')
x = ops.batch_norm(x, scope='bn2')
if is_training:
x = x * rand_backward + tf.stop_gradient(x * rand_forward -
x * rand_backward)
else:
x *= 1.0 / 2
return x
def _shake_shake_block(x, output_filters, stride, is_training):
"""Builds a full shake-shake sub layer."""
batch_size = tf.shape(x)[0]
# Generate random numbers for scaling the branches
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
# Normalize so that all sum to 1
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope('branch_{}'.format(branch)):
b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
is_training)
branches.append(b)
res = _shake_shake_skip_connection(x, output_filters, stride)
return res + tf.add_n(branches)
def _shake_shake_layer(x, output_filters, num_blocks, stride,
is_training):
"""Builds many sub layers into one full layer."""
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
with tf.variable_scope('layer_{}'.format(block_num)):
x = _shake_shake_block(x, output_filters, curr_stride,
is_training)
return x
def build_shake_shake_model(images, num_classes, hparams, is_training):
"""Builds the Shake-Shake model.
Build the Shake-Shake model from https://arxiv.org/abs/1705.07485.
Args:
images: Tensor of images that will be fed into the Wide ResNet Model.
num_classes: Number of classed that the model needs to predict.
hparams: tf.HParams object that contains additional hparams needed to
construct the model. In this case it is the `shake_shake_widen_factor`
that is used to determine how many filters the model has.
is_training: Is the model training or not.
Returns:
The logits of the Shake-Shake model.
"""
depth = 26
k = hparams.shake_shake_widen_factor # The widen factor
n = int((depth - 2) / 6)
x = images
x = ops.conv2d(x, 16, 3, scope='init_conv')
x = ops.batch_norm(x, scope='init_bn')
with tf.variable_scope('L1'):
x = _shake_shake_layer(x, 16 * k, n, 1, is_training)
with tf.variable_scope('L2'):
x = _shake_shake_layer(x, 32 * k, n, 2, is_training)
with tf.variable_scope('L3'):
x = _shake_shake_layer(x, 64 * k, n, 2, is_training)
x = tf.nn.relu(x)
x = ops.global_avg_pool(x)
# Fully connected
logits = ops.fc(x, num_classes)
return logits
| 5,242 | 34.425676 | 80 | py |
models | models-master/research/autoaugment/policies.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def good_policies():
"""AutoAugment policies found on Cifar."""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateY', 0.7, 9)],
[('TranslateY', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateY', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateY', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateY', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateY', 0.8, 9)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.6, 9)],
[('TranslateY', 0.9, 0), ('TranslateY', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('Cutout', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateY', 0.3, 6), ('Cutout', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateX', 0.3, 0), ('TranslateX', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateY', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('Cutout', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
| 6,424 | 44.567376 | 80 | py |
models | models-master/research/autoaugment/train_cifar.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment Train/Eval module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
import custom_ops as ops
import data_utils
import helper_utils
import numpy as np
from shake_drop import build_shake_drop_model
from shake_shake import build_shake_shake_model
import tensorflow as tf
from wrn import build_wrn_model
tf.flags.DEFINE_string('model_name', 'wrn',
'wrn, shake_shake_32, shake_shake_96, shake_shake_112, '
'pyramid_net')
tf.flags.DEFINE_string('checkpoint_dir', '/tmp/training', 'Training Directory.')
tf.flags.DEFINE_string('data_path', '/tmp/data',
'Directory where dataset is located.')
tf.flags.DEFINE_string('dataset', 'cifar10',
'Dataset to train with. Either cifar10 or cifar100')
tf.flags.DEFINE_integer('use_cpu', 1, '1 if use CPU, else GPU.')
FLAGS = tf.flags.FLAGS
arg_scope = tf.contrib.framework.arg_scope
def setup_arg_scopes(is_training):
"""Sets up the argscopes that will be used when building an image model.
Args:
is_training: Is the model training or not.
Returns:
Arg scopes to be put around the model being constructed.
"""
batch_norm_decay = 0.9
batch_norm_epsilon = 1e-5
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
# collection containing the moving mean and moving variance.
'is_training': is_training,
}
scopes = []
scopes.append(arg_scope([ops.batch_norm], **batch_norm_params))
return scopes
def build_model(inputs, num_classes, is_training, hparams):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features/images being fed to the image model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the image model.
Returns:
The logits of the image model.
"""
scopes = setup_arg_scopes(is_training)
with contextlib.nested(*scopes):
if hparams.model_name == 'pyramid_net':
logits = build_shake_drop_model(
inputs, num_classes, is_training)
elif hparams.model_name == 'wrn':
logits = build_wrn_model(
inputs, num_classes, hparams.wrn_size)
elif hparams.model_name == 'shake_shake':
logits = build_shake_shake_model(
inputs, num_classes, hparams, is_training)
return logits
class CifarModel(object):
"""Builds an image model for Cifar10/Cifar100."""
def __init__(self, hparams):
self.hparams = hparams
def build(self, mode):
"""Construct the cifar model."""
assert mode in ['train', 'eval']
self.mode = mode
self._setup_misc(mode)
self._setup_images_and_labels()
self._build_graph(self.images, self.labels, mode)
self.init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
def _setup_misc(self, mode):
"""Sets up miscellaneous in the cifar model constructor."""
self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False)
self.reuse = None if (mode == 'train') else True
self.batch_size = self.hparams.batch_size
if mode == 'eval':
self.batch_size = 25
def _setup_images_and_labels(self):
"""Sets up image and label placeholders for the cifar model."""
if FLAGS.dataset == 'cifar10':
self.num_classes = 10
else:
self.num_classes = 100
self.images = tf.placeholder(tf.float32, [self.batch_size, 32, 32, 3])
self.labels = tf.placeholder(tf.float32,
[self.batch_size, self.num_classes])
def assign_epoch(self, session, epoch_value):
session.run(self._epoch_update, feed_dict={self._new_epoch: epoch_value})
def _build_graph(self, images, labels, mode):
"""Constructs the TF graph for the cifar model.
Args:
images: A 4-D image Tensor
labels: A 2-D labels Tensor.
mode: string indicating training mode ( e.g., 'train', 'valid', 'test').
"""
is_training = 'train' in mode
if is_training:
self.global_step = tf.train.get_or_create_global_step()
logits = build_model(
images,
self.num_classes,
is_training,
self.hparams)
self.predictions, self.cost = helper_utils.setup_loss(
logits, labels)
self.accuracy, self.eval_op = tf.metrics.accuracy(
tf.argmax(labels, 1), tf.argmax(self.predictions, 1))
self._calc_num_trainable_params()
# Adds L2 weight decay to the cost
self.cost = helper_utils.decay_weights(self.cost,
self.hparams.weight_decay_rate)
if is_training:
self._build_train_op()
# Setup checkpointing for this child model
# Keep 2 or more checkpoints around during training.
with tf.device('/cpu:0'):
self.saver = tf.train.Saver(max_to_keep=2)
self.init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
def _calc_num_trainable_params(self):
self.num_trainable_params = np.sum([
np.prod(var.get_shape().as_list()) for var in tf.trainable_variables()
])
tf.logging.info('number of trainable params: {}'.format(
self.num_trainable_params))
def _build_train_op(self):
"""Builds the train op for the cifar model."""
hparams = self.hparams
tvars = tf.trainable_variables()
grads = tf.gradients(self.cost, tvars)
if hparams.gradient_clipping_by_global_norm > 0.0:
grads, norm = tf.clip_by_global_norm(
grads, hparams.gradient_clipping_by_global_norm)
tf.summary.scalar('grad_norm', norm)
# Setup the initial learning rate
initial_lr = self.lr_rate_ph
optimizer = tf.train.MomentumOptimizer(
initial_lr,
0.9,
use_nesterov=True)
self.optimizer = optimizer
apply_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=self.global_step, name='train_step')
train_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies([apply_op]):
self.train_op = tf.group(*train_ops)
class CifarModelTrainer(object):
"""Trains an instance of the CifarModel class."""
def __init__(self, hparams):
self._session = None
self.hparams = hparams
self.model_dir = os.path.join(FLAGS.checkpoint_dir, 'model')
self.log_dir = os.path.join(FLAGS.checkpoint_dir, 'log')
# Set the random seed to be sure the same validation set
# is used for each model
np.random.seed(0)
self.data_loader = data_utils.DataSet(hparams)
np.random.seed() # Put the random seed back to random
self.data_loader.reset()
def save_model(self, step=None):
"""Dumps model into the backup_dir.
Args:
step: If provided, creates a checkpoint with the given step
number, instead of overwriting the existing checkpoints.
"""
model_save_name = os.path.join(self.model_dir, 'model.ckpt')
if not tf.gfile.IsDirectory(self.model_dir):
tf.gfile.MakeDirs(self.model_dir)
self.saver.save(self.session, model_save_name, global_step=step)
tf.logging.info('Saved child model')
def extract_model_spec(self):
"""Loads a checkpoint with the architecture structure stored in the name."""
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
if checkpoint_path is not None:
self.saver.restore(self.session, checkpoint_path)
tf.logging.info('Loaded child model checkpoint from %s',
checkpoint_path)
else:
self.save_model(step=0)
def eval_child_model(self, model, data_loader, mode):
"""Evaluate the child model.
Args:
model: image model that will be evaluated.
data_loader: dataset object to extract eval data from.
mode: will the model be evalled on train, val or test.
Returns:
Accuracy of the model on the specified dataset.
"""
tf.logging.info('Evaluating child model in mode %s', mode)
while True:
try:
with self._new_session(model):
accuracy = helper_utils.eval_child_model(
self.session,
model,
data_loader,
mode)
tf.logging.info('Eval child model accuracy: {}'.format(accuracy))
# If epoch trained without raising the below errors, break
# from loop.
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info('Retryable error caught: %s. Retrying.', e)
return accuracy
@contextlib.contextmanager
def _new_session(self, m):
"""Creates a new session for model m."""
# Create a new session for this model, initialize
# variables, and save / restore from
# checkpoint.
self._session = tf.Session(
'',
config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False))
self.session.run(m.init)
# Load in a previous checkpoint, or save this one
self.extract_model_spec()
try:
yield
finally:
tf.Session.reset('')
self._session = None
def _build_models(self):
"""Builds the image models for train and eval."""
# Determine if we should build the train and eval model. When using
# distributed training we only want to build one or the other and not both.
with tf.variable_scope('model', use_resource=False):
m = CifarModel(self.hparams)
m.build('train')
self._num_trainable_params = m.num_trainable_params
self._saver = m.saver
with tf.variable_scope('model', reuse=True, use_resource=False):
meval = CifarModel(self.hparams)
meval.build('eval')
return m, meval
def _calc_starting_epoch(self, m):
"""Calculates the starting epoch for model m based on global step."""
hparams = self.hparams
batch_size = hparams.batch_size
steps_per_epoch = int(hparams.train_size / batch_size)
with self._new_session(m):
curr_step = self.session.run(m.global_step)
total_steps = steps_per_epoch * hparams.num_epochs
epochs_left = (total_steps - curr_step) // steps_per_epoch
starting_epoch = hparams.num_epochs - epochs_left
return starting_epoch
def _run_training_loop(self, m, curr_epoch):
"""Trains the cifar model `m` for one epoch."""
start_time = time.time()
while True:
try:
with self._new_session(m):
train_accuracy = helper_utils.run_epoch_training(
self.session, m, self.data_loader, curr_epoch)
tf.logging.info('Saving model after epoch')
self.save_model(step=curr_epoch)
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info('Retryable error caught: %s. Retrying.', e)
tf.logging.info('Finished epoch: {}'.format(curr_epoch))
tf.logging.info('Epoch time(min): {}'.format(
(time.time() - start_time) / 60.0))
return train_accuracy
def _compute_final_accuracies(self, meval):
"""Run once training is finished to compute final val/test accuracies."""
valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')
if self.hparams.eval_test:
test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')
else:
test_accuracy = 0
tf.logging.info('Test Accuracy: {}'.format(test_accuracy))
return valid_accuracy, test_accuracy
def run_model(self):
"""Trains and evalutes the image model."""
hparams = self.hparams
# Build the child graph
with tf.Graph().as_default(), tf.device(
'/cpu:0' if FLAGS.use_cpu else '/gpu:0'):
m, meval = self._build_models()
# Figure out what epoch we are on
starting_epoch = self._calc_starting_epoch(m)
# Run the validation error right at the beginning
valid_accuracy = self.eval_child_model(
meval, self.data_loader, 'val')
tf.logging.info('Before Training Epoch: {} Val Acc: {}'.format(
starting_epoch, valid_accuracy))
training_accuracy = None
for curr_epoch in xrange(starting_epoch, hparams.num_epochs):
# Run one training epoch
training_accuracy = self._run_training_loop(m, curr_epoch)
valid_accuracy = self.eval_child_model(
meval, self.data_loader, 'val')
tf.logging.info('Epoch: {} Valid Acc: {}'.format(
curr_epoch, valid_accuracy))
valid_accuracy, test_accuracy = self._compute_final_accuracies(
meval)
tf.logging.info(
'Train Acc: {} Valid Acc: {} Test Acc: {}'.format(
training_accuracy, valid_accuracy, test_accuracy))
@property
def saver(self):
return self._saver
@property
def session(self):
return self._session
@property
def num_trainable_params(self):
return self._num_trainable_params
def main(_):
if FLAGS.dataset not in ['cifar10', 'cifar100']:
raise ValueError('Invalid dataset: %s' % FLAGS.dataset)
hparams = tf.contrib.training.HParams(
train_size=50000,
validation_size=0,
eval_test=1,
dataset=FLAGS.dataset,
data_path=FLAGS.data_path,
batch_size=128,
gradient_clipping_by_global_norm=5.0)
if FLAGS.model_name == 'wrn':
hparams.add_hparam('model_name', 'wrn')
hparams.add_hparam('num_epochs', 200)
hparams.add_hparam('wrn_size', 160)
hparams.add_hparam('lr', 0.1)
hparams.add_hparam('weight_decay_rate', 5e-4)
elif FLAGS.model_name == 'shake_shake_32':
hparams.add_hparam('model_name', 'shake_shake')
hparams.add_hparam('num_epochs', 1800)
hparams.add_hparam('shake_shake_widen_factor', 2)
hparams.add_hparam('lr', 0.01)
hparams.add_hparam('weight_decay_rate', 0.001)
elif FLAGS.model_name == 'shake_shake_96':
hparams.add_hparam('model_name', 'shake_shake')
hparams.add_hparam('num_epochs', 1800)
hparams.add_hparam('shake_shake_widen_factor', 6)
hparams.add_hparam('lr', 0.01)
hparams.add_hparam('weight_decay_rate', 0.001)
elif FLAGS.model_name == 'shake_shake_112':
hparams.add_hparam('model_name', 'shake_shake')
hparams.add_hparam('num_epochs', 1800)
hparams.add_hparam('shake_shake_widen_factor', 7)
hparams.add_hparam('lr', 0.01)
hparams.add_hparam('weight_decay_rate', 0.001)
elif FLAGS.model_name == 'pyramid_net':
hparams.add_hparam('model_name', 'pyramid_net')
hparams.add_hparam('num_epochs', 1800)
hparams.add_hparam('lr', 0.05)
hparams.add_hparam('weight_decay_rate', 5e-5)
hparams.batch_size = 64
else:
raise ValueError('Not Valid Model Name: %s' % FLAGS.model_name)
cifar_trainer = CifarModelTrainer(hparams)
cifar_trainer.run_model()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 15,718 | 33.699779 | 80 | py |
models | models-master/research/autoaugment/wrn.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the Wide-ResNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import custom_ops as ops
import numpy as np
import tensorflow as tf
def residual_block(
x, in_filter, out_filter, stride, activate_before_residual=False):
"""Adds residual connection to `x` in addition to applying BN->ReLU->3x3 Conv.
Args:
x: Tensor that is the output of the previous layer in the model.
in_filter: Number of filters `x` has.
out_filter: Number of filters that the output of this layer will have.
stride: Integer that specified what stride should be applied to `x`.
activate_before_residual: Boolean on whether a BN->ReLU should be applied
to x before the convolution is applied.
Returns:
A Tensor that is the result of applying two sequences of BN->ReLU->3x3 Conv
and then adding that Tensor to `x`.
"""
if activate_before_residual: # Pass up RELU and BN activation for resnet
with tf.variable_scope('shared_activation'):
x = ops.batch_norm(x, scope='init_bn')
x = tf.nn.relu(x)
orig_x = x
else:
orig_x = x
block_x = x
if not activate_before_residual:
with tf.variable_scope('residual_only_activation'):
block_x = ops.batch_norm(block_x, scope='init_bn')
block_x = tf.nn.relu(block_x)
with tf.variable_scope('sub1'):
block_x = ops.conv2d(
block_x, out_filter, 3, stride=stride, scope='conv1')
with tf.variable_scope('sub2'):
block_x = ops.batch_norm(block_x, scope='bn2')
block_x = tf.nn.relu(block_x)
block_x = ops.conv2d(
block_x, out_filter, 3, stride=1, scope='conv2')
with tf.variable_scope(
'sub_add'): # If number of filters do not agree then zero pad them
if in_filter != out_filter:
orig_x = ops.avg_pool(orig_x, stride, stride)
orig_x = ops.zero_pad(orig_x, in_filter, out_filter)
x = orig_x + block_x
return x
def _res_add(in_filter, out_filter, stride, x, orig_x):
"""Adds `x` with `orig_x`, both of which are layers in the model.
Args:
in_filter: Number of filters in `orig_x`.
out_filter: Number of filters in `x`.
stride: Integer specifying the stide that should be applied `orig_x`.
x: Tensor that is the output of the previous layer.
orig_x: Tensor that is the output of an earlier layer in the network.
Returns:
A Tensor that is the result of `x` and `orig_x` being added after
zero padding and striding are applied to `orig_x` to get the shapes
to match.
"""
if in_filter != out_filter:
orig_x = ops.avg_pool(orig_x, stride, stride)
orig_x = ops.zero_pad(orig_x, in_filter, out_filter)
x = x + orig_x
orig_x = x
return x, orig_x
def build_wrn_model(images, num_classes, wrn_size):
"""Builds the WRN model.
Build the Wide ResNet model from https://arxiv.org/abs/1605.07146.
Args:
images: Tensor of images that will be fed into the Wide ResNet Model.
num_classes: Number of classed that the model needs to predict.
wrn_size: Parameter that scales the number of filters in the Wide ResNet
model.
Returns:
The logits of the Wide ResNet model.
"""
kernel_size = wrn_size
filter_size = 3
num_blocks_per_resnet = 4
filters = [
min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4
]
strides = [1, 2, 2] # stride for each resblock
# Run the first conv
with tf.variable_scope('init'):
x = images
output_filters = filters[0]
x = ops.conv2d(x, output_filters, filter_size, scope='init_conv')
first_x = x # Res from the beginning
orig_x = x # Res from previous block
for block_num in range(1, 4):
with tf.variable_scope('unit_{}_0'.format(block_num)):
activate_before_residual = True if block_num == 1 else False
x = residual_block(
x,
filters[block_num - 1],
filters[block_num],
strides[block_num - 1],
activate_before_residual=activate_before_residual)
for i in range(1, num_blocks_per_resnet):
with tf.variable_scope('unit_{}_{}'.format(block_num, i)):
x = residual_block(
x,
filters[block_num],
filters[block_num],
1,
activate_before_residual=False)
x, orig_x = _res_add(filters[block_num - 1], filters[block_num],
strides[block_num - 1], x, orig_x)
final_stride_val = np.prod(strides)
x, _ = _res_add(filters[0], filters[3], final_stride_val, x, first_x)
with tf.variable_scope('unit_last'):
x = ops.batch_norm(x, scope='final_bn')
x = tf.nn.relu(x)
x = ops.global_avg_pool(x)
logits = ops.fc(x, num_classes)
return logits
| 5,415 | 33.062893 | 80 | py |
models | models-master/research/autoaugment/helper_utils.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions used for training AutoAugment models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def setup_loss(logits, labels):
"""Returns the cross entropy for the given `logits` and `labels`."""
predictions = tf.nn.softmax(logits)
cost = tf.losses.softmax_cross_entropy(onehot_labels=labels,
logits=logits)
return predictions, cost
def decay_weights(cost, weight_decay_rate):
"""Calculates the loss for l2 weight decay and adds it to `cost`."""
costs = []
for var in tf.trainable_variables():
costs.append(tf.nn.l2_loss(var))
cost += tf.multiply(weight_decay_rate, tf.add_n(costs))
return cost
def eval_child_model(session, model, data_loader, mode):
"""Evaluates `model` on held out data depending on `mode`.
Args:
session: TensorFlow session the model will be run with.
model: TensorFlow model that will be evaluated.
data_loader: DataSet object that contains data that `model` will
evaluate.
mode: Will `model` either evaluate validation or test data.
Returns:
Accuracy of `model` when evaluated on the specified dataset.
Raises:
ValueError: if invalid dataset `mode` is specified.
"""
if mode == 'val':
images = data_loader.val_images
labels = data_loader.val_labels
elif mode == 'test':
images = data_loader.test_images
labels = data_loader.test_labels
else:
raise ValueError('Not valid eval mode')
assert len(images) == len(labels)
tf.logging.info('model.batch_size is {}'.format(model.batch_size))
assert len(images) % model.batch_size == 0
eval_batches = int(len(images) / model.batch_size)
for i in range(eval_batches):
eval_images = images[i * model.batch_size:(i + 1) * model.batch_size]
eval_labels = labels[i * model.batch_size:(i + 1) * model.batch_size]
_ = session.run(
model.eval_op,
feed_dict={
model.images: eval_images,
model.labels: eval_labels,
})
return session.run(model.accuracy)
def cosine_lr(learning_rate, epoch, iteration, batches_per_epoch, total_epochs):
"""Cosine Learning rate.
Args:
learning_rate: Initial learning rate.
epoch: Current epoch we are one. This is one based.
iteration: Current batch in this epoch.
batches_per_epoch: Batches per epoch.
total_epochs: Total epochs you are training for.
Returns:
The learning rate to be used for this current batch.
"""
t_total = total_epochs * batches_per_epoch
t_cur = float(epoch * batches_per_epoch + iteration)
return 0.5 * learning_rate * (1 + np.cos(np.pi * t_cur / t_total))
def get_lr(curr_epoch, hparams, iteration=None):
"""Returns the learning rate during training based on the current epoch."""
assert iteration is not None
batches_per_epoch = int(hparams.train_size / hparams.batch_size)
lr = cosine_lr(hparams.lr, curr_epoch, iteration, batches_per_epoch,
hparams.num_epochs)
return lr
def run_epoch_training(session, model, data_loader, curr_epoch):
"""Runs one epoch of training for the model passed in.
Args:
session: TensorFlow session the model will be run with.
model: TensorFlow model that will be evaluated.
data_loader: DataSet object that contains data that `model` will
evaluate.
curr_epoch: How many of epochs of training have been done so far.
Returns:
The accuracy of 'model' on the training set
"""
steps_per_epoch = int(model.hparams.train_size / model.hparams.batch_size)
tf.logging.info('steps per epoch: {}'.format(steps_per_epoch))
curr_step = session.run(model.global_step)
assert curr_step % steps_per_epoch == 0
# Get the current learning rate for the model based on the current epoch
curr_lr = get_lr(curr_epoch, model.hparams, iteration=0)
tf.logging.info('lr of {} for epoch {}'.format(curr_lr, curr_epoch))
for step in xrange(steps_per_epoch):
curr_lr = get_lr(curr_epoch, model.hparams, iteration=(step + 1))
# Update the lr rate variable to the current LR.
model.lr_rate_ph.load(curr_lr, session=session)
if step % 20 == 0:
tf.logging.info('Training {}/{}'.format(step, steps_per_epoch))
train_images, train_labels = data_loader.next_batch()
_, step, _ = session.run(
[model.train_op, model.global_step, model.eval_op],
feed_dict={
model.images: train_images,
model.labels: train_labels,
})
train_accuracy = session.run(model.accuracy)
tf.logging.info('Train accuracy: {}'.format(train_accuracy))
return train_accuracy
| 5,376 | 34.846667 | 80 | py |
models | models-master/research/rebar/download_data.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download MNIST, Omniglot datasets for Rebar."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import urllib
import gzip
import os
import config
import struct
import numpy as np
import cPickle as pickle
import datasets
MNIST_URL = 'see README'
MNIST_BINARIZED_URL = 'see README'
OMNIGLOT_URL = 'see README'
MNIST_FLOAT_TRAIN = 'train-images-idx3-ubyte'
def load_mnist_float(local_filename):
with open(local_filename, 'rb') as f:
f.seek(4)
nimages, rows, cols = struct.unpack('>iii', f.read(12))
dim = rows*cols
images = np.fromfile(f, dtype=np.dtype(np.ubyte))
images = (images/255.0).astype('float32').reshape((nimages, dim))
return images
if __name__ == '__main__':
if not os.path.exists(config.DATA_DIR):
os.makedirs(config.DATA_DIR)
# Get MNIST and convert to npy file
local_filename = os.path.join(config.DATA_DIR, MNIST_FLOAT_TRAIN)
if not os.path.exists(local_filename):
urllib.urlretrieve("%s/%s.gz" % (MNIST_URL, MNIST_FLOAT_TRAIN), local_filename+'.gz')
with gzip.open(local_filename+'.gz', 'rb') as f:
file_content = f.read()
with open(local_filename, 'wb') as f:
f.write(file_content)
os.remove(local_filename+'.gz')
mnist_float_train = load_mnist_float(local_filename)[:-10000]
# save in a nice format
np.save(os.path.join(config.DATA_DIR, config.MNIST_FLOAT), mnist_float_train)
# Get binarized MNIST
splits = ['train', 'valid', 'test']
mnist_binarized = []
for split in splits:
filename = 'binarized_mnist_%s.amat' % split
url = '%s/binarized_mnist_%s.amat' % (MNIST_BINARIZED_URL, split)
local_filename = os.path.join(config.DATA_DIR, filename)
if not os.path.exists(local_filename):
urllib.urlretrieve(url, local_filename)
with open(local_filename, 'rb') as f:
mnist_binarized.append((np.array([map(int, line.split()) for line in f.readlines()]).astype('float32'), None))
# save in a nice format
with open(os.path.join(config.DATA_DIR, config.MNIST_BINARIZED), 'w') as out:
pickle.dump(mnist_binarized, out)
# Get Omniglot
local_filename = os.path.join(config.DATA_DIR, config.OMNIGLOT)
if not os.path.exists(local_filename):
urllib.urlretrieve(OMNIGLOT_URL,
local_filename)
| 3,002 | 32.366667 | 116 | py |
models | models-master/research/rebar/utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic data management and plotting utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cPickle as pickle
import getpass
import numpy as np
import gc
import tensorflow as tf
#
# Python utlities
#
def exp_moving_average(x, alpha=0.9):
res = []
mu = 0
alpha_factor = 1
for x_i in x:
mu += (1 - alpha)*(x_i - mu)
alpha_factor *= alpha
res.append(mu/(1 - alpha_factor))
return np.array(res)
def sanitize(s):
return s.replace('.', '_')
#
# Tensorflow utilities
#
def softplus(x):
'''
Let m = max(0, x), then,
sofplus(x) = log(1 + e(x)) = log(e(0) + e(x)) = log(e(m)(e(-m) + e(x-m)))
= m + log(e(-m) + e(x - m))
The term inside of the log is guaranteed to be between 1 and 2.
'''
m = tf.maximum(tf.zeros_like(x), x)
return m + tf.log(tf.exp(-m) + tf.exp(x - m))
def safe_log_prob(x, eps=1e-8):
return tf.log(tf.clip_by_value(x, eps, 1.0))
def rms(x):
return tf.sqrt(tf.reduce_mean(tf.square(x)))
def center(x):
mu = (tf.reduce_sum(x) - x)/tf.to_float(tf.shape(x)[0] - 1)
return x - mu
def vectorize(grads_and_vars, set_none_to_zero=False, skip_none=False):
if set_none_to_zero:
return tf.concat([tf.reshape(g, [-1]) if g is not None else
tf.reshape(tf.zeros_like(v), [-1]) for g, v in grads_and_vars], 0)
elif skip_none:
return tf.concat([tf.reshape(g, [-1]) for g, v in grads_and_vars if g is not None], 0)
else:
return tf.concat([tf.reshape(g, [-1]) for g, v in grads_and_vars], 0)
def add_grads_and_vars(a, b):
'''Add grads_and_vars from two calls to tf.compute_gradients.'''
res = []
for (g_a, v_a), (g_b, v_b) in zip(a, b):
assert v_a == v_b
if g_a is None:
res.append((g_b, v_b))
elif g_b is None:
res.append((g_a, v_a))
else:
res.append((g_a + g_b, v_a))
return res
def binary_log_likelihood(y, log_y_hat):
"""Computes binary log likelihood.
Args:
y: observed data
log_y_hat: parameters of the binary variables
Returns:
log_likelihood
"""
return tf.reduce_sum(y*(-softplus(-log_y_hat)) +
(1 - y)*(-log_y_hat-softplus(-log_y_hat)),
1)
def cov(a, b):
"""Compute the sample covariance between two vectors."""
mu_a = tf.reduce_mean(a)
mu_b = tf.reduce_mean(b)
n = tf.to_float(tf.shape(a)[0])
return tf.reduce_sum((a - mu_a)*(b - mu_b))/(n - 1.0)
def corr(a, b):
return cov(a, b)*tf.rsqrt(cov(a, a))*tf.rsqrt(cov(b, b))
def logSumExp(t, axis=0, keep_dims = False):
'''Computes the log(sum(exp(t))) numerically stabily.
Args:
t: input tensor
axis: which axis to sum over
keep_dims: whether to keep the dim or not
Returns:
tensor with result
'''
m = tf.reduce_max(t, [axis])
res = m + tf.log(tf.reduce_sum(tf.exp(t - tf.expand_dims(m, axis)), [axis]))
if keep_dims:
return tf.expand_dims(res, axis)
else:
return res
| 3,666 | 25.766423 | 91 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.