repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
models
models-master/official/projects/qat/vision/n_bit/schemes.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quantization schemes.""" from typing import Type # Import libraries import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.qat.vision.n_bit import configs from official.projects.qat.vision.n_bit import nn_blocks keras = tf.keras default_n_bit_transforms = tfmot.quantization.keras.experimental.default_n_bit.default_n_bit_transforms _LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode _LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern _ModelTransformer = tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer _QUANTIZATION_WEIGHT_NAMES = [ 'output_max', 'output_min', 'optimizer_step', 'kernel_min', 'kernel_max', 'depthwise_kernel_min', 'depthwise_kernel_max', 'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max'] _ORIGINAL_WEIGHT_NAME = [ 'kernel', 'depthwise_kernel', 'gamma', 'beta', 'moving_mean', 'moving_variance', 'bias'] class CustomLayerQuantize( tfmot.quantization.keras.graph_transformations.transforms.Transform): """Add QAT support for Keras Custom layer.""" def __init__(self, original_layer_pattern: str, quantized_layer_class: Type[keras.layers.Layer], num_bits_weight: int = 8, num_bits_activation: int = 8): super().__init__() self._original_layer_pattern = original_layer_pattern self._quantized_layer_class = quantized_layer_class self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def pattern(self) -> _LayerPattern: """See base class.""" return _LayerPattern(self._original_layer_pattern) def _is_quantization_weight_name(self, name): simple_name = name.split('/')[-1].split(':')[0] if simple_name in _QUANTIZATION_WEIGHT_NAMES: return True if simple_name in _ORIGINAL_WEIGHT_NAME: return False raise ValueError(f'Variable name {simple_name} is not supported on ' 'CustomLayerQuantize({self._original_layer_pattern}) ' 'transform.') def replacement(self, match_layer: _LayerNode) -> _LayerNode: """See base class.""" bottleneck_layer = match_layer.layer bottleneck_config = bottleneck_layer['config'] bottleneck_config['num_bits_weight'] = self._num_bits_weight bottleneck_config['num_bits_activation'] = self._num_bits_activation bottleneck_names_and_weights = list(match_layer.names_and_weights) quantized_layer = self._quantized_layer_class( **bottleneck_config) dummy_input_shape = [1, 1, 1, 1] quantized_layer.compute_output_shape(dummy_input_shape) quantized_names_and_weights = zip( [weight.name for weight in quantized_layer.weights], quantized_layer.get_weights()) match_idx = 0 names_and_weights = [] for name_and_weight in quantized_names_and_weights: if not self._is_quantization_weight_name(name=name_and_weight[0]): name_and_weight = bottleneck_names_and_weights[match_idx] match_idx = match_idx + 1 names_and_weights.append(name_and_weight) if match_idx != len(bottleneck_names_and_weights): raise ValueError('{}/{} of Bottleneck weights is transformed.'.format( match_idx, len(bottleneck_names_and_weights))) quantized_layer_config = keras.layers.serialize(quantized_layer) quantized_layer_config['name'] = quantized_layer_config['config']['name'] layer_metadata = { 'quantize_config': configs.DefaultNBitOutputQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)} return _LayerNode( quantized_layer_config, metadata=layer_metadata, names_and_weights=names_and_weights) class QuantizeLayoutTransform( tfmot.quantization.keras.QuantizeLayoutTransform): """Default model transformations.""" def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def apply(self, model, layer_quantize_map): """Implement default 8-bit transforms. Currently this means the following. 1. Pull activations into layers, and apply fuse activations. (TODO) 2. Modify range in incoming layers for Concat. (TODO) 3. Fuse Conv2D/DepthwiseConv2D + BN into single layer. Args: model: Keras model to be quantized. layer_quantize_map: Map with keys as layer names, and values as dicts containing custom `QuantizeConfig`s which may have been passed with layers. Returns: (Transformed Keras model to better match TensorFlow Lite backend, updated layer quantize map.) """ transforms = [ default_n_bit_transforms.InputLayerQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.SeparableConv1DQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.SeparableConvQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DReshapeBatchNormReLUQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DReshapeBatchNormActivationQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DBatchNormReLUQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DBatchNormActivationQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DReshapeBatchNormQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.Conv2DBatchNormQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.ConcatTransform6Inputs( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.ConcatTransform5Inputs( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.ConcatTransform4Inputs( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.ConcatTransform3Inputs( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.ConcatTransform( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.LayerReLUQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), default_n_bit_transforms.LayerReluActivationQuantize( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), CustomLayerQuantize( 'Vision>BottleneckBlock', nn_blocks.BottleneckBlockNBitQuantized, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), CustomLayerQuantize( 'Vision>InvertedBottleneckBlock', nn_blocks.InvertedBottleneckBlockNBitQuantized, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation), CustomLayerQuantize( 'Vision>Conv2DBNBlock', nn_blocks.Conv2DBNBlockNBitQuantized, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation) ] return _ModelTransformer(model, transforms, set(layer_quantize_map.keys()), layer_quantize_map).transform() class DefaultNBitQuantizeScheme(tfmot.quantization.keras.experimental .default_n_bit.DefaultNBitQuantizeScheme): """Default N-bit Scheme.""" def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): super(DefaultNBitQuantizeScheme, self).__init__( num_bits_weight=num_bits_weight, num_bits_activation=num_bits_activation) self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def get_layout_transformer(self): return QuantizeLayoutTransform( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)
9,733
42.455357
103
py
models
models-master/official/projects/qat/vision/n_bit/nn_layers.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for neural networks.""" from typing import Any, Callable, Dict, Union import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.n_bit import configs from official.vision.modeling.layers import nn_layers # Type annotations. States = Dict[str, tf.Tensor] Activation = Union[str, Callable] class NoOpActivation: """No-op activation which simply returns the incoming tensor. This activation is required to distinguish between `keras.activations.linear` which does the same thing. The main difference is that NoOpActivation should not have any quantize operation applied to it. """ def __call__(self, x: tf.Tensor) -> tf.Tensor: return x def get_config(self) -> Dict[str, Any]: """Get a config of this object.""" return {} def __eq__(self, other: Any) -> bool: return isinstance(other, NoOpActivation) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def _quantize_wrapped_layer(cls, quantize_config): def constructor(*arg, **kwargs): return tfmot.quantization.keras.QuantizeWrapperV2( cls(*arg, **kwargs), quantize_config) return constructor @tf.keras.utils.register_keras_serializable(package='Vision') class SqueezeExcitationNBitQuantized(tf.keras.layers.Layer): """Creates a squeeze and excitation layer.""" def __init__(self, in_filters, out_filters, se_ratio, divisible_by=1, use_3d_input=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', gating_activation='sigmoid', num_bits_weight=8, num_bits_activation=8, **kwargs): """Initializes a squeeze and excitation layer. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. use_3d_input: A `bool` of whether input is 2D or 3D image. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. gating_activation: A `str` name of the activation function for final gating function. num_bits_weight: An `int` number of bits for the weight. Default to 8. num_bits_activation: An `int` number of bits for the weight. Default to 8. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._se_ratio = se_ratio self._divisible_by = divisible_by self._use_3d_input = use_3d_input self._activation = activation self._gating_activation = gating_activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation if tf.keras.backend.image_data_format() == 'channels_last': if not use_3d_input: self._spatial_axis = [1, 2] else: self._spatial_axis = [1, 2, 3] else: if not use_3d_input: self._spatial_axis = [2, 3] else: self._spatial_axis = [2, 3, 4] self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._gating_activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(gating_activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) def build(self, input_shape): conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) conv2d_quantized_output_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], True, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) num_reduced_filters = nn_layers.make_divisible( max(1, int(self._in_filters * self._se_ratio)), divisor=self._divisible_by) self._se_reduce = conv2d_quantized( filters=num_reduced_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._se_expand = conv2d_quantized_output_quantized( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._multiply = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Multiply(), configs.DefaultNBitQuantizeConfig( [], [], True, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._reduce_mean_quantizer = ( tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=self._num_bits_activation, per_axis=False, symmetric=False, narrow_range=False)) # activation/output self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build( None, 'reduce_mean_quantizer_vars', self) super().build(input_shape) def get_config(self): config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'use_3d_input': self._use_3d_input, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'gating_activation': self._gating_activation, 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True) x = self._reduce_mean_quantizer( x, training, self._reduce_mean_quantizer_vars) x = self._activation_layer(self._se_reduce(x)) x = self._gating_activation_layer(self._se_expand(x)) x = self._multiply([x, inputs]) return x
8,359
37.703704
80
py
models
models-master/official/projects/qat/vision/n_bit/nn_blocks_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_blocks.""" from typing import Any, Iterable, Tuple # Import libraries from absl.testing import parameterized import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.projects.qat.vision.n_bit import nn_blocks def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: """Returns the combinations of end-to-end tests to run.""" return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], ) class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks.BottleneckBlockNBitQuantized, 1, False, 0.0, None, 4, 4), (nn_blocks.BottleneckBlockNBitQuantized, 2, True, 0.2, 0.25, 4, 4), ) def test_bottleneck_block_creation(self, block_fn, strides, use_projection, stochastic_depth_drop_rate, se_ratio, num_bits_weight, num_bits_activation): input_size = 128 filter_size = 256 inputs = tf.keras.Input( shape=(input_size, input_size, filter_size * 4), batch_size=1) block = block_fn( filter_size, strides, use_projection=use_projection, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, num_bits_weight=num_bits_weight, num_bits_activation=num_bits_activation) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, filter_size * 4], features.shape.as_list()) @parameterized.parameters( (nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, None, None, 4, 4), (nn_blocks.InvertedBottleneckBlockNBitQuantized, 6, 1, None, None, 4, 4), (nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 2, None, None, 4, 4), (nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, 0.2, None, 4, 4), (nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, None, 0.2, 4, 4), ) def test_invertedbottleneck_block_creation( self, block_fn, expand_ratio, strides, se_ratio, stochastic_depth_drop_rate, num_bits_weight, num_bits_activation): input_size = 128 in_filters = 24 out_filters = 40 inputs = tf.keras.Input( shape=(input_size, input_size, in_filters), batch_size=1) block = block_fn( in_filters=in_filters, out_filters=out_filters, expand_ratio=expand_ratio, strides=strides, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, num_bits_weight=num_bits_weight, num_bits_activation=num_bits_activation) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, out_filters], features.shape.as_list()) if __name__ == '__main__': tf.test.main()
3,653
35.54
79
py
models
models-master/official/projects/qat/vision/n_bit/configs_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for configs.py.""" # Import libraries import numpy as np import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.n_bit import configs class _TestHelper(object): def _convert_list(self, list_of_tuples): """Transforms a list of 2-tuples to a tuple of 2 lists. `QuantizeConfig` methods return a list of 2-tuples in the form [(weight1, quantizer1), (weight2, quantizer2)]. This function converts it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2). Args: list_of_tuples: List of 2-tuples. Returns: 2-tuple of lists. """ list1 = [] list2 = [] for a, b in list_of_tuples: list1.append(a) list2.append(b) return list1, list2 # TODO(pulkitb): Consider asserting on full equality for quantizers. def _assert_weight_quantizers(self, quantizer_list): for quantizer in quantizer_list: self.assertIsInstance( quantizer, tfmot.quantization.keras.quantizers.LastValueQuantizer) def _assert_activation_quantizers(self, quantizer_list): for quantizer in quantizer_list: self.assertIsInstance( quantizer, tfmot.quantization.keras.quantizers.MovingAverageQuantizer) def _assert_kernel_equality(self, a, b): self.assertAllEqual(a.numpy(), b.numpy()) class DefaultNBitQuantizeConfigTest(tf.test.TestCase, _TestHelper): def _simple_dense_layer(self): layer = tf.keras.layers.Dense(2) layer.build(input_shape=(3,)) return layer def testGetsQuantizeWeightsAndQuantizers(self): layer = self._simple_dense_layer() num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) (weights, weight_quantizers) = self._convert_list( quantize_config.get_weights_and_quantizers(layer)) self._assert_weight_quantizers(weight_quantizers) self.assertEqual([layer.kernel], weights) def testGetsQuantizeActivationsAndQuantizers(self): layer = self._simple_dense_layer() num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) (activations, activation_quantizers) = self._convert_list( quantize_config.get_activations_and_quantizers(layer)) self._assert_activation_quantizers(activation_quantizers) self.assertEqual([layer.activation], activations) def testSetsQuantizeWeights(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable( np.ones(layer.kernel.shape.as_list())) num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) quantize_config.set_quantize_weights(layer, [quantize_kernel]) self._assert_kernel_equality(layer.kernel, quantize_kernel) def testSetsQuantizeActivations(self): layer = self._simple_dense_layer() quantize_activation = tf.keras.activations.relu num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) quantize_config.set_quantize_activations(layer, [quantize_activation]) self.assertEqual(layer.activation, quantize_activation) def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable( np.ones(layer.kernel.shape.as_list())) num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, []) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, [quantize_kernel, quantize_kernel]) def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable(np.ones([1, 2])) num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, [quantize_kernel]) def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self): layer = self._simple_dense_layer() quantize_activation = tf.keras.activations.relu num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) with self.assertRaises(ValueError): quantize_config.set_quantize_activations(layer, []) with self.assertRaises(ValueError): quantize_config.set_quantize_activations( layer, [quantize_activation, quantize_activation]) def testGetsResultQuantizers_ReturnsQuantizer(self): layer = self._simple_dense_layer() num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( [], [], True, num_bits_weight, num_bits_activation) output_quantizers = quantize_config.get_output_quantizers(layer) self.assertLen(output_quantizers, 1) self._assert_activation_quantizers(output_quantizers) def testGetsResultQuantizers_EmptyWhenFalse(self): layer = self._simple_dense_layer() num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( [], [], False, num_bits_weight, num_bits_activation) output_quantizers = quantize_config.get_output_quantizers(layer) self.assertEqual([], output_quantizers) def testSerialization(self): num_bits_weight = 4 num_bits_activation = 4 quantize_config = configs.DefaultNBitQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight, num_bits_activation) expected_config = { 'class_name': 'DefaultNBitQuantizeConfig', 'config': { 'weight_attrs': ['kernel'], 'activation_attrs': ['activation'], 'quantize_output': False, 'num_bits_weight': 4, 'num_bits_activation': 4 } } serialized_quantize_config = tf_utils.serialize_keras_object( quantize_config ) self.assertEqual(expected_config, serialized_quantize_config) quantize_config_from_config = ( tf_utils.deserialize_keras_object( serialized_quantize_config, module_objects=globals(), custom_objects=configs._types_dict(), ) ) self.assertEqual(quantize_config, quantize_config_from_config) if __name__ == '__main__': tf.test.main()
7,762
32.752174
80
py
models
models-master/official/projects/qat/vision/n_bit/nn_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains quantized neural blocks for the QAT.""" from typing import Any, Dict, Optional, Sequence, Union # Import libraries from absl import logging import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.n_bit import configs from official.projects.qat.vision.n_bit import nn_layers as qat_nn_layers from official.vision.modeling.layers import nn_layers class NoOpActivation: """No-op activation which simply returns the incoming tensor. This activation is required to distinguish between `keras.activations.linear` which does the same thing. The main difference is that NoOpActivation should not have any quantize operation applied to it. """ def __call__(self, x: tf.Tensor) -> tf.Tensor: return x def get_config(self) -> Dict[str, Any]: """Get a config of this object.""" return {} def __eq__(self, other: Any) -> bool: if not other or not isinstance(other, NoOpActivation): return False return True def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def _quantize_wrapped_layer(cls, quantize_config): def constructor(*arg, **kwargs): return tfmot.quantization.keras.QuantizeWrapperV2( cls(*arg, **kwargs), quantize_config) return constructor # This class is copied from modeling.layers.nn_blocks.BottleneckBlock and apply # QAT. @tf.keras.utils.register_keras_serializable(package='Vision') class BottleneckBlockNBitQuantized(tf.keras.layers.Layer): """A quantized standard bottleneck block.""" def __init__(self, filters: int, strides: int, dilation_rate: int = 1, use_projection: bool = False, se_ratio: Optional[float] = None, resnetd_shortcut: bool = False, stochastic_depth_drop_rate: Optional[float] = None, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: tf.keras.regularizers.Regularizer = None, bias_regularizer: tf.keras.regularizers.Regularizer = None, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, bn_trainable: bool = True, num_bits_weight: int = 8, num_bits_activation: int = 8, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Initializes a standard bottleneck block with BN after convolutions. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. dilation_rate: An `int` dilation_rate of convolutions. Default to 1. use_projection: A `bool` for whether this block should use a projection shortcut (versus the default identity shortcut). This is usually `True` for the first block of a block group, which may change the number of filters and the resolution. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. resnetd_shortcut: A `bool`. If True, apply the resnetd style modification to the shortcut connection. stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. bn_trainable: A `bool` that indicates whether batch norm layers should be trainable. Default to True. num_bits_weight: An `int` number of bits for the weight. Default to 8. num_bits_activation: An `int` number of bits for the weight. Default to 8. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._filters = filters self._strides = strides self._dilation_rate = dilation_rate self._use_projection = use_projection self._se_ratio = se_ratio self._resnetd_shortcut = resnetd_shortcut self._use_sync_bn = use_sync_bn self._activation = activation self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation if use_sync_bn: self._norm = _quantize_wrapped_layer( tf.keras.layers.experimental.SyncBatchNormalization, configs.NoOpQuantizeConfig()) self._norm_with_quantize = _quantize_wrapped_layer( tf.keras.layers.experimental.SyncBatchNormalization, configs.DefaultNBitOutputQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) else: self._norm = _quantize_wrapped_layer( tf.keras.layers.BatchNormalization, configs.NoOpQuantizeConfig()) self._norm_with_quantize = _quantize_wrapped_layer( tf.keras.layers.BatchNormalization, configs.DefaultNBitOutputQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._bn_trainable = bn_trainable def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._use_projection: if self._resnetd_shortcut: self._shortcut0 = tf.keras.layers.AveragePooling2D( pool_size=2, strides=self._strides, padding='same') self._shortcut1 = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) else: self._shortcut = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=self._strides, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm0 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._conv1 = conv2d_quantized( filters=self._filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv2 = conv2d_quantized( filters=self._filters, kernel_size=3, strides=self._strides, dilation_rate=self._dilation_rate, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv3 = conv2d_quantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm3 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.DefaultNBitQuantizeConfig( [], [], True, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) super().build(input_shape) def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'filters': self._filters, 'strides': self._strides, 'dilation_rate': self._dilation_rate, 'use_projection': self._use_projection, 'se_ratio': self._se_ratio, 'resnetd_shortcut': self._resnetd_shortcut, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'bn_trainable': self._bn_trainable, 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor: """Run the BottleneckBlockQuantized logics.""" shortcut = inputs if self._use_projection: if self._resnetd_shortcut: shortcut = self._shortcut0(shortcut) shortcut = self._shortcut1(shortcut) else: shortcut = self._shortcut(shortcut) shortcut = self._norm0(shortcut) x = self._conv1(inputs) x = self._norm1(x) x = self._activation1(x) x = self._conv2(x) x = self._norm2(x) x = self._activation2(x) x = self._conv3(x) x = self._norm3(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) return self._activation3(x) # This class is copied from modeling.backbones.mobilenet.Conv2DBNBlock and apply # QAT. @tf.keras.utils.register_keras_serializable(package='Vision') class Conv2DBNBlockNBitQuantized(tf.keras.layers.Layer): """A quantized convolution block with batch normalization.""" def __init__( self, filters: int, kernel_size: int = 3, strides: int = 1, use_bias: bool = False, activation: str = 'relu6', kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, use_normalization: bool = True, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, num_bits_weight: int = 8, num_bits_activation: int = 8, **kwargs): """A convolution block with batch normalization. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. kernel_size: An `int` specifying the height and width of the 2D convolution window. strides: An `int` of block stride. If greater than 1, this block will ultimately downsample the input. use_bias: If True, use bias in the convolution layer. activation: A `str` name of the activation function. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. use_normalization: If True, use batch normalization. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. num_bits_weight: An `int` number of bits for the weight. Default to 8. num_bits_activation: An `int` number of bits for the weight. Default to 8. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._filters = filters self._kernel_size = kernel_size self._strides = strides self._activation = activation self._use_bias = use_bias self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_normalization = use_normalization self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation if use_sync_bn: self._norm = _quantize_wrapped_layer( tf.keras.layers.experimental.SyncBatchNormalization, configs.NoOpQuantizeConfig()) else: self._norm = _quantize_wrapped_layer( tf.keras.layers.BatchNormalization, configs.NoOpQuantizeConfig()) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'filters': self._filters, 'strides': self._strides, 'kernel_size': self._kernel_size, 'use_bias': self._use_bias, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'use_normalization': self._use_normalization, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) self._conv0 = conv2d_quantized( filters=self._filters, kernel_size=self._kernel_size, strides=self._strides, padding='same', use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) if self._use_normalization: self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) super(Conv2DBNBlockNBitQuantized, self).build(input_shape) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor: """Run the Conv2DBNBlockNBitQuantized logics.""" x = self._conv0(inputs) if self._use_normalization: x = self._norm0(x) return self._activation_layer(x) @tf.keras.utils.register_keras_serializable(package='Vision') class InvertedBottleneckBlockNBitQuantized(tf.keras.layers.Layer): """A quantized inverted bottleneck block.""" def __init__(self, in_filters, out_filters, expand_ratio, strides, kernel_size=3, se_ratio=None, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', se_inner_activation='relu', se_gating_activation='sigmoid', expand_se_in_filters=False, depthwise_activation=None, use_sync_bn=False, dilation_rate=1, divisible_by=1, regularize_depthwise=False, use_depthwise=True, use_residual=True, norm_momentum=0.99, norm_epsilon=0.001, num_bits_weight: int = 8, num_bits_activation: int = 8, **kwargs): """Initializes an inverted bottleneck block with BN after convolutions. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. expand_ratio: An `int` of expand_ratio for an inverted bottleneck block. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. kernel_size: An `int` kernel_size of the depthwise conv layer. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. se_inner_activation: A `str` name of squeeze-excitation inner activation. se_gating_activation: A `str` name of squeeze-excitation gating activation. expand_se_in_filters: A `bool` of whether or not to expand in_filter in squeeze and excitation layer. depthwise_activation: A `str` name of the activation function for depthwise only. use_sync_bn: A `bool`. If True, use synchronized batch normalization. dilation_rate: An `int` that specifies the dilation rate to use for. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. dilated convolution: An `int` to specify the same value for all spatial dimensions. regularize_depthwise: A `bool` of whether or not apply regularization on depthwise. use_depthwise: A `bool` of whether to uses fused convolutions instead of depthwise. use_residual: A `bool` of whether to include residual connection between input and output. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. num_bits_weight: An `int` number of bits for the weight. Default to 8. num_bits_activation: An `int` number of bits for the weight. Default to 8. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._expand_ratio = expand_ratio self._strides = strides self._kernel_size = kernel_size self._se_ratio = se_ratio self._divisible_by = divisible_by self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._dilation_rate = dilation_rate self._use_sync_bn = use_sync_bn self._regularize_depthwise = regularize_depthwise self._use_depthwise = use_depthwise self._use_residual = use_residual self._activation = activation self._se_inner_activation = se_inner_activation self._se_gating_activation = se_gating_activation self._depthwise_activation = depthwise_activation self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._expand_se_in_filters = expand_se_in_filters self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation if use_sync_bn: self._norm = _quantize_wrapped_layer( tf.keras.layers.experimental.SyncBatchNormalization, configs.NoOpQuantizeConfig()) self._norm_with_quantize = _quantize_wrapped_layer( tf.keras.layers.experimental.SyncBatchNormalization, configs.DefaultNBitOutputQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) else: self._norm = _quantize_wrapped_layer( tf.keras.layers.BatchNormalization, configs.NoOpQuantizeConfig()) self._norm_with_quantize = _quantize_wrapped_layer( tf.keras.layers.BatchNormalization, configs.DefaultNBitOutputQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 if not depthwise_activation: self._depthwise_activation = activation if regularize_depthwise: self._depthsize_regularizer = kernel_regularizer else: self._depthsize_regularizer = None def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.DefaultNBitConvQuantizeConfig( ['kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) depthwise_conv2d_quantized = _quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.DefaultNBitConvQuantizeConfig( ['depthwise_kernel'], ['activation'], False, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) expand_filters = self._in_filters if self._expand_ratio > 1: # First 1x1 conv for channel expansion. expand_filters = nn_layers.make_divisible( self._in_filters * self._expand_ratio, self._divisible_by) expand_kernel = 1 if self._use_depthwise else self._kernel_size expand_stride = 1 if self._use_depthwise else self._strides self._conv0 = conv2d_quantized( filters=expand_filters, kernel_size=expand_kernel, strides=expand_stride, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm0 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation)) if self._use_depthwise: # Depthwise conv. self._conv1 = depthwise_conv2d_quantized( kernel_size=(self._kernel_size, self._kernel_size), strides=self._strides, padding='same', depth_multiplier=1, dilation_rate=self._dilation_rate, use_bias=False, depthwise_initializer=self._kernel_initializer, depthwise_regularizer=self._depthsize_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm1 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._depthwise_activation_layer = ( tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._depthwise_activation, use_keras_layer=True), configs.DefaultNBitActivationQuantizeConfig( num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation))) # Squeeze and excitation. if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: logging.info('Use Squeeze and excitation.') in_filters = self._in_filters if self._expand_se_in_filters: in_filters = expand_filters self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized( in_filters=in_filters, out_filters=expand_filters, se_ratio=self._se_ratio, divisible_by=self._divisible_by, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._se_inner_activation, gating_activation=self._se_gating_activation, num_bits_weight=self._num_bits_weight, num_bits_activation=self._num_bits_activation) else: self._squeeze_excitation = None # Last 1x1 conv. self._conv2 = conv2d_quantized( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=NoOpActivation()) self._norm2 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tf.keras.layers.Add() super().build(input_shape) def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'expand_ratio': self._expand_ratio, 'strides': self._strides, 'kernel_size': self._kernel_size, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'se_inner_activation': self._se_inner_activation, 'se_gating_activation': self._se_gating_activation, 'expand_se_in_filters': self._expand_se_in_filters, 'depthwise_activation': self._depthwise_activation, 'dilation_rate': self._dilation_rate, 'use_sync_bn': self._use_sync_bn, 'regularize_depthwise': self._regularize_depthwise, 'use_depthwise': self._use_depthwise, 'use_residual': self._use_residual, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor: """Run the InvertedBottleneckBlockNBitQuantized logics.""" shortcut = inputs if self._expand_ratio > 1: x = self._conv0(inputs) x = self._norm0(x) x = self._activation_layer(x) else: x = inputs if self._use_depthwise: x = self._conv1(x) x = self._norm1(x) x = self._depthwise_activation_layer(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) x = self._conv2(x) x = self._norm2(x) if (self._use_residual and self._in_filters == self._out_filters and self._strides == 1): if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) return x
32,587
39.735
103
py
models
models-master/official/projects/qat/vision/n_bit/configs.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Default 8-bit QuantizeConfigs.""" from typing import Sequence, Callable, Tuple, Any, Dict import tensorflow as tf import tensorflow_model_optimization as tfmot Quantizer = tfmot.quantization.keras.quantizers.Quantizer Layer = tf.keras.layers.Layer Activation = Callable[[tf.Tensor], tf.Tensor] WeightAndQuantizer = Tuple[tf.Variable, Quantizer] ActivationAndQuantizer = Tuple[Activation, Quantizer] class DefaultNBitOutputQuantizeConfig( tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig which only quantizes the output from a layer.""" def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: return [] def set_quantize_weights(self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): pass def set_quantize_activations(self, layer: Layer, quantize_activations: Sequence[Activation]): pass def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: return [ tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=self._num_bits_activation, per_axis=False, symmetric=False, narrow_range=False) # activation/output ] def get_config(self) -> Dict[str, Any]: return { 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation, } class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig which does not quantize any part of the layer.""" def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: return [] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): pass def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): pass def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: return [] def get_config(self) -> Dict[str, Any]: return { 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation, } class DefaultNBitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig for non recurrent Keras layers.""" def __init__(self, weight_attrs: Sequence[str], activation_attrs: Sequence[str], quantize_output: bool, num_bits_weight: int = 8, num_bits_activation: int = 8): """Initializes a default N-bit quantize config.""" self.weight_attrs = weight_attrs self.activation_attrs = activation_attrs self.quantize_output = quantize_output self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation # TODO(pulkitb): For some layers such as Conv2D, per_axis should be True. # Add mapping for which layers support per_axis. self.weight_quantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer( num_bits=num_bits_weight, per_axis=False, symmetric=True, narrow_range=True) # weight self.activation_quantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=num_bits_activation, per_axis=False, symmetric=False, narrow_range=False) # activation/output def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: """See base class.""" return [(getattr(layer, weight_attr), self.weight_quantizer) for weight_attr in self.weight_attrs] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: """See base class.""" return [(getattr(layer, activation_attr), self.activation_quantizer) for activation_attr in self.activation_attrs] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): """See base class.""" if len(self.weight_attrs) != len(quantize_weights): raise ValueError( '`set_quantize_weights` called on layer {} with {} ' 'weight parameters, but layer expects {} values.'.format( layer.name, len(quantize_weights), len(self.weight_attrs))) for weight_attr, weight in zip(self.weight_attrs, quantize_weights): current_weight = getattr(layer, weight_attr) if current_weight.shape != weight.shape: raise ValueError('Existing layer weight shape {} is incompatible with' 'provided weight shape {}'.format( current_weight.shape, weight.shape)) setattr(layer, weight_attr, weight) def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): """See base class.""" if len(self.activation_attrs) != len(quantize_activations): raise ValueError( '`set_quantize_activations` called on layer {} with {} ' 'activation parameters, but layer expects {} values.'.format( layer.name, len(quantize_activations), len(self.activation_attrs))) for activation_attr, activation in zip( self.activation_attrs, quantize_activations): setattr(layer, activation_attr, activation) def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: """See base class.""" if self.quantize_output: return [self.activation_quantizer] return [] @classmethod def from_config(cls, config: Dict[str, Any]) -> object: """Instantiates a `DefaultNBitQuantizeConfig` from its config. Args: config: Output of `get_config()`. Returns: A `DefaultNBitQuantizeConfig` instance. """ return cls(**config) def get_config(self) -> Dict[str, Any]: """Get a config for this quantize config.""" # TODO(pulkitb): Add weight and activation quantizer to config. # Currently it's created internally, but ideally the quantizers should be # part of the constructor and passed in from the registry. return { 'weight_attrs': self.weight_attrs, 'activation_attrs': self.activation_attrs, 'quantize_output': self.quantize_output, 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation } def __eq__(self, other): if not isinstance(other, DefaultNBitQuantizeConfig): return False return (self.weight_attrs == other.weight_attrs and self.activation_attrs == self.activation_attrs and self.weight_quantizer == other.weight_quantizer and self.activation_quantizer == other.activation_quantizer and self.quantize_output == other.quantize_output) def __ne__(self, other): return not self.__eq__(other) class DefaultNBitConvWeightsQuantizer( tfmot.quantization.keras.quantizers.LastValueQuantizer): """Quantizer for handling weights in Conv2D/DepthwiseConv2D layers.""" def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): """Construct LastValueQuantizer with params specific for TFLite Convs.""" super(DefaultNBitConvWeightsQuantizer, self).__init__( num_bits=num_bits_weight, per_axis=True, symmetric=True, narrow_range=True) # weight self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def build(self, tensor_shape: tf.TensorShape, name: str, layer: Layer): """Build min/max quantization variables.""" min_weight = layer.add_weight( name + '_min', shape=(tensor_shape[-1],), initializer=tf.keras.initializers.Constant(-6.0), trainable=False) max_weight = layer.add_weight( name + '_max', shape=(tensor_shape[-1],), initializer=tf.keras.initializers.Constant(6.0), trainable=False) return {'min_var': min_weight, 'max_var': max_weight} class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer): """Dummy quantizer for explicitly not quantize.""" def __call__(self, inputs, training, weights, **kwargs): return tf.identity(inputs) def get_config(self): return {} def build(self, tensor_shape, name, layer): return {} class DefaultNBitConvQuantizeConfig(DefaultNBitQuantizeConfig): """QuantizeConfig for Conv2D/DepthwiseConv2D layers.""" def __init__(self, weight_attrs: Sequence[str], activation_attrs: Sequence[str], quantize_output: bool, num_bits_weight: int = 8, num_bits_activation: int = 8): """Initializes default N-bit quantization config for the conv layer.""" super().__init__(weight_attrs=weight_attrs, activation_attrs=activation_attrs, quantize_output=quantize_output, num_bits_weight=num_bits_weight, num_bits_activation=num_bits_activation) self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation self.weight_quantizer = DefaultNBitConvWeightsQuantizer( num_bits_weight=num_bits_weight, num_bits_activation=num_bits_activation) class DefaultNBitActivationQuantizeConfig( tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig for keras.layers.Activation. `keras.layers.Activation` needs a separate `QuantizeConfig` since the decision to quantize depends on the specific activation type. """ def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8): self._num_bits_weight = num_bits_weight self._num_bits_activation = num_bits_activation def _assert_activation_layer(self, layer: Layer): if not isinstance(layer, tf.keras.layers.Activation): raise RuntimeError( 'DefaultNBitActivationQuantizeConfig can only be used with ' '`keras.layers.Activation`.') def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: """See base class.""" self._assert_activation_layer(layer) return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: """See base class.""" self._assert_activation_layer(layer) return [] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): """See base class.""" self._assert_activation_layer(layer) def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): """See base class.""" self._assert_activation_layer(layer) def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: """See base class.""" self._assert_activation_layer(layer) if not hasattr(layer.activation, '__name__'): raise ValueError('Activation {} not supported by ' 'DefaultNBitActivationQuantizeConfig.'.format( layer.activation)) # This code is copied from TFMOT repo, but added relu6 to support mobilenet. if layer.activation.__name__ in ['relu', 'relu6', 'swish']: # 'relu' should generally get fused into the previous layer. return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=self._num_bits_activation, per_axis=False, symmetric=False, narrow_range=False)] # activation/output elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid']: return [] raise ValueError('Activation {} not supported by ' 'DefaultNBitActivationQuantizeConfig.'.format( layer.activation)) def get_config(self) -> Dict[str, Any]: """Get a config for this quantizer config.""" return { 'num_bits_weight': self._num_bits_weight, 'num_bits_activation': self._num_bits_activation, } def _types_dict(): return { 'DefaultNBitOutputQuantizeConfig': DefaultNBitOutputQuantizeConfig, 'NoOpQuantizeConfig': NoOpQuantizeConfig, 'DefaultNBitQuantizeConfig': DefaultNBitQuantizeConfig, 'DefaultNBitConvWeightsQuantizer': DefaultNBitConvWeightsQuantizer, 'DefaultNBitConvQuantizeConfig': DefaultNBitConvQuantizeConfig, 'DefaultNBitActivationQuantizeConfig': DefaultNBitActivationQuantizeConfig, }
13,651
34.832021
91
py
models
models-master/official/projects/qat/vision/quantization/layer_transforms.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains custom quantization layer transforms.""" from typing import Any, Type, Mapping, List, Union, Tuple import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.modeling.layers import nn_blocks as quantized_nn_blocks from official.projects.qat.vision.modeling.layers import nn_layers as quantized_nn_layers from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper keras = tf.keras LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern _LAYER_NAMES = [ 'Vision>Conv2DBNBlock', 'Vision>InvertedBottleneckBlock', 'Vision>SegmentationHead', 'Vision>SpatialPyramidPooling', 'Vision>ASPP' ] class CustomLayerQuantize( tfmot.quantization.keras.graph_transformations.transforms.Transform): """Add QAT support for Keras Custom layer.""" def __init__(self, original_layer_pattern: str, quantized_layer_class: Type[keras.layers.Layer]): super(CustomLayerQuantize, self).__init__() self._original_layer_pattern = original_layer_pattern self._quantized_layer_class = quantized_layer_class def pattern(self) -> LayerPattern: """See base class.""" return LayerPattern(self._original_layer_pattern) def _create_layer_metadata( self, layer_class_name: str ) -> Mapping[str, tfmot.quantization.keras.QuantizeConfig]: if layer_class_name in _LAYER_NAMES: layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()} else: layer_metadata = { 'quantize_config': configs.Default8BitOutputQuantizeConfig() } return layer_metadata def _create_dummy_input_shape( self, quantized_layer: tf.keras.layers.Layer ) -> Union[List[int], Tuple[Any, Any]]: dummy_input_shape = [1, 128, 128, 1] # SegmentationHead layer requires a tuple of 2 tensors. if isinstance(quantized_layer, quantized_nn_layers.SegmentationHeadQuantized): dummy_input_shape = ([1, 1, 1, 1], [1, 1, 1, 1]) return dummy_input_shape def replacement(self, match_layer: LayerNode) -> LayerNode: """See base class.""" bottleneck_layer = match_layer.layer bottleneck_config = bottleneck_layer['config'] bottleneck_names_and_weights = list(match_layer.names_and_weights) quantized_layer = self._quantized_layer_class(**bottleneck_config) dummy_input_shape = self._create_dummy_input_shape(quantized_layer) quantized_layer.compute_output_shape(dummy_input_shape) quantized_names_and_weights = zip( [weight.name for weight in quantized_layer.weights], quantized_layer.get_weights()) match_idx = 0 names_and_weights = [] for name_and_weight in quantized_names_and_weights: if not helper.is_quantization_weight_name(name=name_and_weight[0]): name_and_weight = bottleneck_names_and_weights[match_idx] match_idx = match_idx + 1 names_and_weights.append(name_and_weight) if match_idx != len(bottleneck_names_and_weights): raise ValueError('{}/{} of Bottleneck weights is transformed.'.format( match_idx, len(bottleneck_names_and_weights))) quantized_layer_config = tf_utils.serialize_layer( quantized_layer, use_legacy_format=True ) quantized_layer_config['name'] = quantized_layer_config['config']['name'] layer_metadata = self._create_layer_metadata(bottleneck_layer['class_name']) return LayerNode( quantized_layer_config, metadata=layer_metadata, names_and_weights=names_and_weights) CUSTOM_TRANSFORMS = [ CustomLayerQuantize('Vision>BottleneckBlock', quantized_nn_blocks.BottleneckBlockQuantized), CustomLayerQuantize('Vision>InvertedBottleneckBlock', quantized_nn_blocks.InvertedBottleneckBlockQuantized), CustomLayerQuantize('Vision>Conv2DBNBlock', quantized_nn_blocks.Conv2DBNBlockQuantized), CustomLayerQuantize('Vision>SegmentationHead', quantized_nn_layers.SegmentationHeadQuantized), CustomLayerQuantize('Vision>SpatialPyramidPooling', quantized_nn_layers.SpatialPyramidPoolingQuantized), CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized) ]
5,057
41.504202
89
py
models
models-master/official/projects/qat/vision/quantization/helper_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for helper.""" import numpy as np import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.qat.vision.quantization import helper class HelperTest(tf.test.TestCase): def create_simple_model(self): return tf.keras.models.Sequential([ tf.keras.layers.Dense(8, input_shape=(16,)), ]) def test_copy_original_weights_for_simple_model_with_custom_weights(self): one_model = self.create_simple_model() one_weights = [np.ones_like(weight) for weight in one_model.get_weights()] one_model.set_weights(one_weights) qat_model = tfmot.quantization.keras.quantize_model( self.create_simple_model()) zero_weights = [np.zeros_like(weight) for weight in qat_model.get_weights()] qat_model.set_weights(zero_weights) helper.copy_original_weights(one_model, qat_model) qat_model_weights = qat_model.get_weights() count = 0 for idx, weight in enumerate(qat_model.weights): if not helper.is_quantization_weight_name(weight.name): self.assertAllEqual( qat_model_weights[idx], np.ones_like(qat_model_weights[idx])) count += 1 self.assertLen(one_model.weights, count) self.assertGreater(len(qat_model.weights), len(one_model.weights)) if __name__ == '__main__': tf.test.main()
1,928
34.072727
80
py
models
models-master/official/projects/qat/vision/quantization/schemes.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quantization schemes.""" # Import libraries import tensorflow_model_optimization as tfmot from official.projects.qat.vision.quantization import layer_transforms default_8bit_transforms = tfmot.quantization.keras.default_8bit.default_8bit_transforms class QuantizeLayoutTransform( tfmot.quantization.keras.QuantizeLayoutTransform): """Default model transformations.""" def apply(self, model, layer_quantize_map): """Implement default 8-bit transforms. Currently this means the following. 1. Pull activations into layers, and apply fuse activations. (TODO) 2. Modify range in incoming layers for Concat. (TODO) 3. Fuse Conv2D/DepthwiseConv2D + BN into single layer. Args: model: Keras model to be quantized. layer_quantize_map: Map with keys as layer names, and values as dicts containing custom `QuantizeConfig`s which may have been passed with layers. Returns: (Transformed Keras model to better match TensorFlow Lite backend, updated layer quantize map.) """ transforms = [ default_8bit_transforms.InputLayerQuantize(), default_8bit_transforms.SeparableConv1DQuantize(), default_8bit_transforms.SeparableConvQuantize(), default_8bit_transforms.Conv2DReshapeBatchNormReLUQuantize(), default_8bit_transforms.Conv2DReshapeBatchNormActivationQuantize(), default_8bit_transforms.Conv2DBatchNormReLUQuantize(), default_8bit_transforms.Conv2DBatchNormActivationQuantize(), default_8bit_transforms.Conv2DReshapeBatchNormQuantize(), default_8bit_transforms.Conv2DBatchNormQuantize(), default_8bit_transforms.ConcatTransform6Inputs(), default_8bit_transforms.ConcatTransform5Inputs(), default_8bit_transforms.ConcatTransform4Inputs(), default_8bit_transforms.ConcatTransform3Inputs(), default_8bit_transforms.ConcatTransform(), default_8bit_transforms.LayerReLUQuantize(), default_8bit_transforms.LayerReluActivationQuantize() ] transforms += layer_transforms.CUSTOM_TRANSFORMS return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer( model, transforms, set(layer_quantize_map.keys()), layer_quantize_map).transform() class Default8BitQuantizeScheme( tfmot.quantization.keras.default_8bit.Default8BitQuantizeScheme): def get_layout_transformer(self): return QuantizeLayoutTransform()
3,089
39.12987
93
py
models
models-master/official/projects/qat/vision/quantization/helper.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quantization helpers.""" from __future__ import annotations import copy from typing import Any, Dict, List, Optional, Type, Union import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.qat.vision.quantization import configs _QUANTIZATION_WEIGHT_NAMES = [ 'output_max', 'output_min', 'optimizer_step', 'kernel_min', 'kernel_max', 'add_three_min', 'add_three_max', 'divide_six_min', 'divide_six_max', 'depthwise_kernel_min', 'depthwise_kernel_max', 'pointwise_kernel_min', 'pointwise_kernel_max', 'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max', 'quantize_layer_min', 'quantize_layer_max', 'quantize_layer_1_min', 'quantize_layer_1_max', 'quantize_layer_2_min', 'quantize_layer_2_max', 'quantize_layer_3_min', 'quantize_layer_3_max', 'post_activation_min', 'post_activation_max', ] _ORIGINAL_WEIGHT_NAME = [ 'kernel', 'depthwise_kernel', 'pointwise_kernel', 'gamma', 'beta', 'moving_mean', 'moving_variance', 'bias', ] def is_quantization_weight_name(name: str) -> bool: simple_name = name.split('/')[-1].split(':')[0] if simple_name in _QUANTIZATION_WEIGHT_NAMES: return True if simple_name in _ORIGINAL_WEIGHT_NAME: return False raise ValueError('Variable name {} is not supported.'.format(simple_name)) def copy_original_weights(original_model: tf.keras.Model, quantized_model: tf.keras.Model): """Helper function that copy the original model weights to quantized model.""" original_weight_value = original_model.get_weights() weight_values = quantized_model.get_weights() original_idx = 0 for idx, weight in enumerate(quantized_model.weights): if not is_quantization_weight_name(weight.name): if original_idx >= len(original_weight_value): raise ValueError('Not enought original model weights.') weight_values[idx] = original_weight_value[original_idx] original_idx = original_idx + 1 if original_idx < len(original_weight_value): raise ValueError('Not enought quantized model weights.') quantized_model.set_weights(weight_values) class LayerQuantizerHelper(object): """Helper class that handles quantizers.""" def __init__(self, *args, **kwargs): self._quantizers = {} self._quantizer_vars = {} super().__init__(*args, **kwargs) def _all_value_quantizer(self): return tfmot.quantization.keras.quantizers.AllValuesQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) def _moving_average_quantizer(self): return tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) def _add_quantizer(self, name, all_value_quantizer=False): if all_value_quantizer: self._quantizers[name] = self._all_value_quantizer() else: self._quantizers[name] = self._moving_average_quantizer() def _apply_quantizer(self, name, inputs, training, **kwargs): return self._quantizers[name]( inputs, training, self._quantizer_vars[name], **kwargs) def _build_quantizer_vars(self): for name in self._quantizers: self._quantizer_vars[name] = self._quantizers[name].build( tensor_shape=None, name=name, layer=self) class NoOpActivation: """No-op activation which simply returns the incoming tensor. This activation is required to distinguish between `keras.activations.linear` which does the same thing. The main difference is that NoOpActivation should not have any quantize operation applied to it. """ def __call__(self, x: tf.Tensor) -> tf.Tensor: return x def get_config(self) -> Dict[str, Any]: """Get a config of this object.""" return {} def __eq__(self, other: Any) -> bool: if not other or not isinstance(other, NoOpActivation): return False return True def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def quantize_wrapped_layer(cls, quantize_config): def constructor(*arg, **kwargs): return tfmot.quantization.keras.QuantizeWrapperV2( cls(*arg, **kwargs), quantize_config) return constructor def norm_by_activation(activation, norm_quantized, norm_no_quantized): if activation not in ['relu', 'relu6']: return norm_quantized else: return norm_no_quantized class SeparableConv2DQuantized(tf.keras.layers.Layer): """Quantized SeperableConv2D.""" def __init__( self, name: Optional[str] = None, last_quantize: bool = False, **conv_kwargs, ): """Initializes a SeparableConv2DQuantized. Args: name: The name of the layer. last_quantize: A `bool` indicates whether add quantization for the output. **conv_kwargs: A keyword arguments to be used for conv and dwconv. """ super().__init__(name=name) self._conv_kwargs = copy.deepcopy(conv_kwargs) self._name = name self._last_quantize = last_quantize def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the child layers of the layer.""" depthwise_conv2d_quantized = quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], [], True), ) conv2d_quantized = quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig( ['kernel'], [], self._last_quantize ), ) dwconv_kwargs = self._conv_kwargs.copy() # Depthwise conv input filters is always equal to output filters. # This filters argument only needed for the point-wise conv2d op. del dwconv_kwargs['filters'] dwconv_kwargs.update({ 'activation': None, 'use_bias': False, }) self.dw_conv = depthwise_conv2d_quantized(name='dw', **dwconv_kwargs) conv_kwargs = self._conv_kwargs.copy() conv_kwargs.update({ 'kernel_size': (1, 1), 'strides': (1, 1), 'padding': 'valid', 'groups': 1, }) self.conv = conv2d_quantized(name='pw', **conv_kwargs) def call(self, inputs: tf.Tensor) -> tf.Tensor: """Call the separable conv layer.""" x = self.dw_conv(inputs) outputs = self.conv(x) return outputs def get_config(self) -> Dict[str, Any]: """Returns the config of the layer.""" config = self._conv_kwargs.copy() config.update({ 'name': self._name, 'last_quantize': self._last_quantize, }) return config @classmethod def from_config( cls: Type[SeparableConv2DQuantized], config: Dict[str, Any] ) -> SeparableConv2DQuantized: """Creates a layer from its config.""" return cls(**config) Conv2DQuantized = quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False)) Conv2DOutputQuantized = quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True)) DepthwiseConv2DQuantized = quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'], False)) DepthwiseConv2DOutputQuantized = quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'], True)) GlobalAveragePooling2DQuantized = quantize_wrapped_layer( tf.keras.layers.GlobalAveragePooling2D, configs.Default8BitQuantizeConfig([], [], True)) AveragePooling2DQuantized = quantize_wrapped_layer( tf.keras.layers.AveragePooling2D, configs.Default8BitQuantizeConfig([], [], True)) ResizingQuantized = quantize_wrapped_layer( tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True)) ConcatenateQuantized = quantize_wrapped_layer( tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [], True)) UpSampling2DQuantized = quantize_wrapped_layer( tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [], True)) ReshapeQuantized = quantize_wrapped_layer( tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True)) DenseQuantized = quantize_wrapped_layer( tf.keras.layers.Dense, configs.Default8BitQuantizeConfig(['kernel'], ['activation'], False), ) DenseOutputQuantized = quantize_wrapped_layer( tf.keras.layers.Dense, configs.Default8BitQuantizeConfig(['kernel'], ['activation'], True), ) IdentityQuantized = quantize_wrapped_layer( tf.keras.layers.Identity, configs.Default8BitQuantizeConfig([], [], True) ) # pylint:disable=g-long-lambda BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer( norm_layer, configs.Default8BitOutputQuantizeConfig()) BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer( norm_layer, configs.NoOpQuantizeConfig())
9,789
32.186441
80
py
models
models-master/official/projects/qat/vision/quantization/configs_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for configs.py.""" # Import libraries import numpy as np import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.quantization import configs class _TestHelper(object): def _convert_list(self, list_of_tuples): """Transforms a list of 2-tuples to a tuple of 2 lists. `QuantizeConfig` methods return a list of 2-tuples in the form [(weight1, quantizer1), (weight2, quantizer2)]. This function converts it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2). Args: list_of_tuples: List of 2-tuples. Returns: 2-tuple of lists. """ list1 = [] list2 = [] for a, b in list_of_tuples: list1.append(a) list2.append(b) return list1, list2 # TODO(pulkitb): Consider asserting on full equality for quantizers. def _assert_weight_quantizers(self, quantizer_list): for quantizer in quantizer_list: self.assertIsInstance( quantizer, tfmot.quantization.keras.quantizers.LastValueQuantizer) def _assert_activation_quantizers(self, quantizer_list): for quantizer in quantizer_list: self.assertIsInstance( quantizer, tfmot.quantization.keras.quantizers.MovingAverageQuantizer) def _assert_kernel_equality(self, a, b): self.assertAllEqual(a.numpy(), b.numpy()) class Default8BitQuantizeConfigTest(tf.test.TestCase, _TestHelper): def _simple_dense_layer(self): layer = tf.keras.layers.Dense(2) layer.build(input_shape=(3,)) return layer def testGetsQuantizeWeightsAndQuantizers(self): layer = self._simple_dense_layer() quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) (weights, weight_quantizers) = self._convert_list( quantize_config.get_weights_and_quantizers(layer)) self._assert_weight_quantizers(weight_quantizers) self.assertEqual([layer.kernel], weights) def testGetsQuantizeActivationsAndQuantizers(self): layer = self._simple_dense_layer() quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) (activations, activation_quantizers) = self._convert_list( quantize_config.get_activations_and_quantizers(layer)) self._assert_activation_quantizers(activation_quantizers) self.assertEqual([layer.activation], activations) def testSetsQuantizeWeights(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable( np.ones(layer.kernel.shape.as_list())) quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) quantize_config.set_quantize_weights(layer, [quantize_kernel]) self._assert_kernel_equality(layer.kernel, quantize_kernel) def testSetsQuantizeActivations(self): layer = self._simple_dense_layer() quantize_activation = tf.keras.activations.relu quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) quantize_config.set_quantize_activations(layer, [quantize_activation]) self.assertEqual(layer.activation, quantize_activation) def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable( np.ones(layer.kernel.shape.as_list())) quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, []) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, [quantize_kernel, quantize_kernel]) def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self): layer = self._simple_dense_layer() quantize_kernel = tf.keras.backend.variable(np.ones([1, 2])) quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) with self.assertRaises(ValueError): quantize_config.set_quantize_weights(layer, [quantize_kernel]) def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self): layer = self._simple_dense_layer() quantize_activation = tf.keras.activations.relu quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) with self.assertRaises(ValueError): quantize_config.set_quantize_activations(layer, []) with self.assertRaises(ValueError): quantize_config.set_quantize_activations( layer, [quantize_activation, quantize_activation]) def testGetsResultQuantizers_ReturnsQuantizer(self): layer = self._simple_dense_layer() quantize_config = configs.Default8BitQuantizeConfig( [], [], True) output_quantizers = quantize_config.get_output_quantizers(layer) self.assertLen(output_quantizers, 1) self._assert_activation_quantizers(output_quantizers) def testGetsResultQuantizers_EmptyWhenFalse(self): layer = self._simple_dense_layer() quantize_config = configs.Default8BitQuantizeConfig( [], [], False) output_quantizers = quantize_config.get_output_quantizers(layer) self.assertEqual([], output_quantizers) def testSerialization(self): quantize_config = configs.Default8BitQuantizeConfig( ['kernel'], ['activation'], False) expected_config = { 'class_name': 'Default8BitQuantizeConfig', 'config': { 'weight_attrs': ['kernel'], 'activation_attrs': ['activation'], 'quantize_output': False } } serialized_quantize_config = tf_utils.serialize_keras_object( quantize_config ) self.assertEqual(expected_config, serialized_quantize_config) quantize_config_from_config = ( tf_utils.deserialize_keras_object( serialized_quantize_config, module_objects=globals(), custom_objects=configs._types_dict(), ) ) self.assertEqual(quantize_config, quantize_config_from_config) if __name__ == '__main__': tf.test.main()
6,797
31.682692
79
py
models
models-master/official/projects/qat/vision/quantization/configs.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Default 8-bit QuantizeConfigs.""" from typing import Sequence, Callable, Tuple, Any, Dict import tensorflow as tf import tensorflow_model_optimization as tfmot Quantizer = tfmot.quantization.keras.quantizers.Quantizer Layer = tf.keras.layers.Layer Activation = Callable[[tf.Tensor], tf.Tensor] WeightAndQuantizer = Tuple[tf.Variable, Quantizer] ActivationAndQuantizer = Tuple[Activation, Quantizer] class Default8BitOutputQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig which only quantizes the output from a layer.""" def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: return [] def set_quantize_weights(self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): pass def set_quantize_activations(self, layer: Layer, quantize_activations: Sequence[Activation]): pass def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: return [ tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) ] def get_config(self) -> Dict[str, Any]: return {} class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig which does not quantize any part of the layer.""" def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: return [] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): pass def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): pass def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: return [] def get_config(self) -> Dict[str, Any]: return {} class Default8BitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig for non recurrent Keras layers.""" def __init__(self, weight_attrs: Sequence[str], activation_attrs: Sequence[str], quantize_output: bool): """Initializes a default 8bit quantize config.""" self.weight_attrs = weight_attrs self.activation_attrs = activation_attrs self.quantize_output = quantize_output # TODO(pulkitb): For some layers such as Conv2D, per_axis should be True. # Add mapping for which layers support per_axis. self.weight_quantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer( num_bits=8, per_axis=False, symmetric=True, narrow_range=True) self.activation_quantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: """See base class.""" return [(getattr(layer, weight_attr), self.weight_quantizer) for weight_attr in self.weight_attrs] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: """See base class.""" return [(getattr(layer, activation_attr), self.activation_quantizer) for activation_attr in self.activation_attrs] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): """See base class.""" if len(self.weight_attrs) != len(quantize_weights): raise ValueError( '`set_quantize_weights` called on layer {} with {} ' 'weight parameters, but layer expects {} values.'.format( layer.name, len(quantize_weights), len(self.weight_attrs))) for weight_attr, weight in zip(self.weight_attrs, quantize_weights): current_weight = getattr(layer, weight_attr) if current_weight.shape != weight.shape: raise ValueError('Existing layer weight shape {} is incompatible with' 'provided weight shape {}'.format( current_weight.shape, weight.shape)) setattr(layer, weight_attr, weight) def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): """See base class.""" if len(self.activation_attrs) != len(quantize_activations): raise ValueError( '`set_quantize_activations` called on layer {} with {} ' 'activation parameters, but layer expects {} values.'.format( layer.name, len(quantize_activations), len(self.activation_attrs))) for activation_attr, activation in zip( self.activation_attrs, quantize_activations): setattr(layer, activation_attr, activation) def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: """See base class.""" if self.quantize_output: return [self.activation_quantizer] return [] @classmethod def from_config(cls, config: Dict[str, Any]) -> object: """Instantiates a `Default8BitQuantizeConfig` from its config. Args: config: Output of `get_config()`. Returns: A `Default8BitQuantizeConfig` instance. """ return cls(**config) def get_config(self) -> Dict[str, Any]: """Get a config for this quantize config.""" # TODO(pulkitb): Add weight and activation quantizer to config. # Currently it's created internally, but ideally the quantizers should be # part of the constructor and passed in from the registry. return { 'weight_attrs': self.weight_attrs, 'activation_attrs': self.activation_attrs, 'quantize_output': self.quantize_output } def __eq__(self, other): if not isinstance(other, Default8BitQuantizeConfig): return False return (self.weight_attrs == other.weight_attrs and self.activation_attrs == self.activation_attrs and self.weight_quantizer == other.weight_quantizer and self.activation_quantizer == other.activation_quantizer and self.quantize_output == other.quantize_output) def __ne__(self, other): return not self.__eq__(other) class Default8BitConvWeightsQuantizer( tfmot.quantization.keras.quantizers.LastValueQuantizer): """Quantizer for handling weights in Conv2D/DepthwiseConv2D layers.""" def __init__(self): """Construct LastValueQuantizer with params specific for TFLite Convs.""" super(Default8BitConvWeightsQuantizer, self).__init__( num_bits=8, per_axis=True, symmetric=True, narrow_range=True) def build(self, tensor_shape: tf.TensorShape, name: str, layer: Layer): """Build min/max quantization variables.""" min_weight = layer.add_weight( name + '_min', shape=(tensor_shape[-1],), initializer=tf.keras.initializers.Constant(-6.0), trainable=False) max_weight = layer.add_weight( name + '_max', shape=(tensor_shape[-1],), initializer=tf.keras.initializers.Constant(6.0), trainable=False) return {'min_var': min_weight, 'max_var': max_weight} class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer): """Dummy quantizer for explicitly not quantize.""" def __call__(self, inputs, training, weights, **kwargs): return tf.identity(inputs) def get_config(self): return {} def build(self, tensor_shape, name, layer): return {} class Default8BitConvQuantizeConfig(Default8BitQuantizeConfig): """QuantizeConfig for Conv2D/DepthwiseConv2D layers.""" def __init__(self, weight_attrs: Sequence[str], activation_attrs: Sequence[str], quantize_output: bool): """Initializes default 8bit quantization config for the conv layer.""" super().__init__(weight_attrs, activation_attrs, quantize_output) self.weight_quantizer = Default8BitConvWeightsQuantizer() class Default8BitActivationQuantizeConfig( tfmot.quantization.keras.QuantizeConfig): """QuantizeConfig for keras.layers.Activation. `keras.layers.Activation` needs a separate `QuantizeConfig` since the decision to quantize depends on the specific activation type. """ def _assert_activation_layer(self, layer: Layer): if not isinstance(layer, tf.keras.layers.Activation): raise RuntimeError( 'Default8BitActivationQuantizeConfig can only be used with ' '`keras.layers.Activation`.') def get_weights_and_quantizers( self, layer: Layer) -> Sequence[WeightAndQuantizer]: """See base class.""" self._assert_activation_layer(layer) return [] def get_activations_and_quantizers( self, layer: Layer) -> Sequence[ActivationAndQuantizer]: """See base class.""" self._assert_activation_layer(layer) return [] def set_quantize_weights( self, layer: Layer, quantize_weights: Sequence[tf.Tensor]): """See base class.""" self._assert_activation_layer(layer) def set_quantize_activations( self, layer: Layer, quantize_activations: Sequence[Activation]): """See base class.""" self._assert_activation_layer(layer) def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]: """See base class.""" self._assert_activation_layer(layer) if not hasattr(layer.activation, '__name__'): raise ValueError('Activation {} not supported by ' 'Default8BitActivationQuantizeConfig.'.format( layer.activation)) # This code is copied from TFMOT repo, but added relu6 to support mobilenet. if layer.activation.__name__ in ['relu', 'relu6', 'swish', 'hard_swish']: # 'relu' should generally get fused into the previous layer. return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False)] elif layer.activation.__name__ in [ 'linear', 'softmax', 'sigmoid', 'hard_sigmoid' ]: return [] raise ValueError('Activation {} not supported by ' 'Default8BitActivationQuantizeConfig.'.format( layer.activation)) def get_config(self) -> Dict[str, Any]: """Get a config for this quantizer config.""" return {} def _types_dict(): return { 'Default8BitOutputQuantizeConfig': Default8BitOutputQuantizeConfig, 'NoOpQuantizeConfig': NoOpQuantizeConfig, 'Default8BitQuantizeConfig': Default8BitQuantizeConfig, 'Default8BitConvWeightsQuantizer': Default8BitConvWeightsQuantizer, 'Default8BitConvQuantizeConfig': Default8BitConvQuantizeConfig, 'Default8BitActivationQuantizeConfig': Default8BitActivationQuantizeConfig, }
11,684
33.571006
91
py
models
models-master/official/projects/qat/vision/modeling/factory_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for factory.py.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.projects.qat.vision.configs import common from official.projects.qat.vision.modeling import factory as qat_factory from official.projects.qat.vision.modeling.heads import dense_prediction_heads as qat_dense_prediction_heads from official.vision.configs import backbones from official.vision.configs import decoders from official.vision.configs import image_classification as classification_cfg from official.vision.configs import retinanet as retinanet_cfg from official.vision.configs import semantic_segmentation as semantic_segmentation_cfg from official.vision.modeling import factory from official.vision.modeling.decoders import fpn from official.vision.modeling.heads import dense_prediction_heads class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (224, 224), 5e-5), ('resnet', (224, 224), None), ('resnet', (None, None), 5e-5), ('resnet', (None, None), None), ('mobilenet', (224, 224), 5e-5), ('mobilenet', (224, 224), None), ('mobilenet', (None, None), 5e-5), ('mobilenet', (None, None), None), ) def test_builder(self, backbone_type, input_size, weight_decay): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = classification_cfg.ImageClassificationModel( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) model = factory.build_classification_model( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) quantization_config = common.Quantization() _ = qat_factory.build_qat_classification_model( model=model, input_specs=input_specs, quantization=quantization_config, model_config=model_config, l2_regularizer=l2_regularizer) class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('spinenet_mobile', 'identity', (640, 640), False, False), ('spinenet_mobile', 'identity', (640, 640), True, False), ('mobilenet', 'fpn', (640, 640), True, False), ('mobilenet', 'fpn', (640, 640), True, True), ) def test_builder(self, backbone_type, decoder_type, input_size, quantize_detection_head, quantize_detection_decoder): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) if backbone_type == 'spinenet_mobile': backbone_config = backbones.Backbone( type=backbone_type, spinenet_mobile=backbones.SpineNetMobile( model_id='49', stochastic_depth_drop_rate=0.2, min_level=3, max_level=7, use_keras_upsampling_2d=True)) elif backbone_type == 'mobilenet': backbone_config = backbones.Backbone( type=backbone_type, mobilenet=backbones.MobileNet( model_id='MobileNetV2', filter_size_scale=1.0)) else: raise ValueError( 'backbone_type {} is not supported'.format(backbone_type)) if decoder_type == 'identity': decoder_config = decoders.Decoder(type=decoder_type) elif decoder_type == 'fpn': decoder_config = decoders.Decoder( type=decoder_type, fpn=decoders.FPN( num_filters=128, use_separable_conv=True, use_keras_layer=True)) else: raise ValueError( 'decoder_type {} is not supported'.format(decoder_type)) model_config = retinanet_cfg.RetinaNet( num_classes=num_classes, input_size=[input_size[0], input_size[1], 3], backbone=backbone_config, decoder=decoder_config, head=retinanet_cfg.RetinaNetHead( attribute_heads=None, use_separable_conv=True)) l2_regularizer = tf.keras.regularizers.l2(5e-5) # Build the original float32 retinanet model. model = factory.build_retinanet( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) # Call the model with dummy input to build the head part. dummpy_input = tf.zeros([1] + model_config.input_size) model(dummpy_input, training=True) # Build the QAT model from the original model with quantization config. qat_model = qat_factory.build_qat_retinanet( model=model, quantization=common.Quantization( quantize_detection_decoder=quantize_detection_decoder, quantize_detection_head=quantize_detection_head), model_config=model_config) if quantize_detection_head: # head become a RetinaNetHeadQuantized when we apply quantization. self.assertIsInstance(qat_model.head, qat_dense_prediction_heads.RetinaNetHeadQuantized) else: # head is a RetinaNetHead if we don't apply quantization on head part. self.assertIsInstance( qat_model.head, dense_prediction_heads.RetinaNetHead) self.assertNotIsInstance( qat_model.head, qat_dense_prediction_heads.RetinaNetHeadQuantized) if decoder_type == 'FPN': if quantize_detection_decoder: # FPN decoder become a general keras functional model after applying # quantization. self.assertNotIsInstance(qat_model.decoder, fpn.FPN) else: self.assertIsInstance(qat_model.decoder, fpn.FPN) class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('mobilenet', (512, 512), 5e-5),) def test_deeplabv3_builder(self, backbone_type, input_size, weight_decay): num_classes = 21 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = semantic_segmentation_cfg.SemanticSegmentationModel( num_classes=num_classes, backbone=backbones.Backbone( type=backbone_type, mobilenet=backbones.MobileNet( model_id='MobileNetV2', output_stride=16)), decoder=decoders.Decoder( type='aspp', aspp=decoders.ASPP( level=4, num_filters=256, dilation_rates=[], spp_layer_version='v1', output_tensor=True)), head=semantic_segmentation_cfg.SegmentationHead( level=4, low_level=2, num_convs=1, upsample_factor=2, use_depthwise_convolution=True)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) model = factory.build_segmentation_model( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) quantization_config = common.Quantization() _ = qat_factory.build_qat_segmentation_model( model=model, quantization=quantization_config, input_specs=input_specs) @parameterized.parameters( ('mobilenet', (512, 1024), 5e-5),) def test_deeplabv3plus_builder(self, backbone_type, input_size, weight_decay): num_classes = 19 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = semantic_segmentation_cfg.SemanticSegmentationModel( num_classes=num_classes, backbone=backbones.Backbone( type=backbone_type, mobilenet=backbones.MobileNet( model_id='MobileNetV2', output_stride=16, output_intermediate_endpoints=True)), decoder=decoders.Decoder( type='aspp', aspp=decoders.ASPP( level=4, num_filters=256, dilation_rates=[], pool_kernel_size=[512, 1024], use_depthwise_convolution=False, spp_layer_version='v1', output_tensor=True)), head=semantic_segmentation_cfg.SegmentationHead( level=4, num_convs=2, feature_fusion='deeplabv3plus', use_depthwise_convolution=True, low_level='2/depthwise', low_level_num_filters=48, prediction_kernel_size=1, upsample_factor=1, num_filters=256)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) model = factory.build_segmentation_model( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) quantization_config = common.Quantization() _ = qat_factory.build_qat_segmentation_model( model=model, quantization=quantization_config, input_specs=input_specs) if __name__ == '__main__': tf.test.main()
9,737
37.642857
108
py
models
models-master/official/projects/qat/vision/modeling/factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factory methods to build models.""" # Import libraries import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.qat.vision.configs import common from official.projects.qat.vision.modeling import segmentation_model as qat_segmentation_model from official.projects.qat.vision.modeling.heads import dense_prediction_heads as dense_prediction_heads_qat from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers from official.projects.qat.vision.n_bit import schemes as n_bit_schemes from official.projects.qat.vision.quantization import configs as qat_configs from official.projects.qat.vision.quantization import helper from official.projects.qat.vision.quantization import schemes from official.vision import configs from official.vision.modeling import classification_model from official.vision.modeling import retinanet_model from official.vision.modeling.decoders import aspp from official.vision.modeling.decoders import fpn from official.vision.modeling.heads import dense_prediction_heads from official.vision.modeling.heads import segmentation_heads from official.vision.modeling.layers import nn_layers def build_qat_classification_model( model: tf.keras.Model, quantization: common.Quantization, input_specs: tf.keras.layers.InputSpec, model_config: configs.image_classification.ImageClassificationModel, l2_regularizer: tf.keras.regularizers.Regularizer = None ) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Apply model optimization techniques. Args: model: The model applying model optimization techniques. quantization: The Quantization config. input_specs: `tf.keras.layers.InputSpec` specs of the input tensor. model_config: The model config. l2_regularizer: tf.keras.regularizers.Regularizer object. Default to None. Returns: model: The model that applied optimization techniques. """ original_checkpoint = quantization.pretrained_original_checkpoint if original_checkpoint: ckpt = tf.train.Checkpoint( model=model, **model.checkpoint_items) status = ckpt.read(original_checkpoint) status.expect_partial().assert_existing_objects_matched() scope_dict = { 'L2': tf.keras.regularizers.l2, } with tfmot.quantization.keras.quantize_scope(scope_dict): annotated_backbone = tfmot.quantization.keras.quantize_annotate_model( model.backbone) if quantization.change_num_bits: backbone = tfmot.quantization.keras.quantize_apply( annotated_backbone, scheme=n_bit_schemes.DefaultNBitQuantizeScheme( num_bits_weight=quantization.num_bits_weight, num_bits_activation=quantization.num_bits_activation)) else: backbone = tfmot.quantization.keras.quantize_apply( annotated_backbone, scheme=schemes.Default8BitQuantizeScheme()) norm_activation_config = model_config.norm_activation backbone_optimized_model = classification_model.ClassificationModel( backbone=backbone, num_classes=model_config.num_classes, input_specs=input_specs, dropout_rate=model_config.dropout_rate, kernel_regularizer=l2_regularizer, add_head_batch_norm=model_config.add_head_batch_norm, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon) for from_layer, to_layer in zip( model.layers, backbone_optimized_model.layers): if from_layer != model.backbone: to_layer.set_weights(from_layer.get_weights()) with tfmot.quantization.keras.quantize_scope(scope_dict): def apply_quantization_to_dense(layer): if isinstance(layer, (tf.keras.layers.Dense, tf.keras.layers.Dropout, tf.keras.layers.GlobalAveragePooling2D)): return tfmot.quantization.keras.quantize_annotate_layer(layer) return layer backbone_optimized_model.use_legacy_config = True annotated_model = tf.keras.models.clone_model( backbone_optimized_model, clone_function=apply_quantization_to_dense, ) annotated_model.use_legacy_config = True if quantization.change_num_bits: optimized_model = tfmot.quantization.keras.quantize_apply( annotated_model, scheme=n_bit_schemes.DefaultNBitQuantizeScheme( num_bits_weight=quantization.num_bits_weight, num_bits_activation=quantization.num_bits_activation)) else: optimized_model = tfmot.quantization.keras.quantize_apply( annotated_model) return optimized_model def _clone_function_for_fpn(layer): if isinstance(layer, ( tf.keras.layers.BatchNormalization, tf.keras.layers.experimental.SyncBatchNormalization)): return tfmot.quantization.keras.quantize_annotate_layer( qat_nn_layers.BatchNormalizationWrapper(layer), qat_configs.Default8BitOutputQuantizeConfig()) if isinstance(layer, tf.keras.layers.UpSampling2D): return layer return tfmot.quantization.keras.quantize_annotate_layer(layer) def build_qat_retinanet( model: tf.keras.Model, quantization: common.Quantization, model_config: configs.retinanet.RetinaNet) -> tf.keras.Model: """Applies quantization aware training for RetinaNet model. Args: model: The model applying quantization aware training. quantization: The Quantization config. model_config: The model config. Returns: The model that applied optimization techniques. """ original_checkpoint = quantization.pretrained_original_checkpoint if original_checkpoint is not None: ckpt = tf.train.Checkpoint( model=model, **model.checkpoint_items) status = ckpt.read(original_checkpoint) status.expect_partial().assert_existing_objects_matched() scope_dict = { 'L2': tf.keras.regularizers.l2, 'BatchNormalizationWrapper': qat_nn_layers.BatchNormalizationWrapper, } with tfmot.quantization.keras.quantize_scope(scope_dict): annotated_backbone = tfmot.quantization.keras.quantize_annotate_model( model.backbone) optimized_backbone = tfmot.quantization.keras.quantize_apply( annotated_backbone, scheme=schemes.Default8BitQuantizeScheme()) decoder = model.decoder if quantization.quantize_detection_decoder: if not isinstance(decoder, fpn.FPN): raise ValueError('Currently only supports FPN.') decoder = tf.keras.models.clone_model( decoder, clone_function=_clone_function_for_fpn, ) decoder = tfmot.quantization.keras.quantize_apply(decoder) decoder = tfmot.quantization.keras.remove_input_range(decoder) head = model.head if quantization.quantize_detection_head: if not isinstance(head, dense_prediction_heads.RetinaNetHead): raise ValueError('Currently only supports RetinaNetHead.') head = ( dense_prediction_heads_qat.RetinaNetHeadQuantized.from_config( head.get_config())) optimized_model = retinanet_model.RetinaNetModel( optimized_backbone, decoder, head, model.detection_generator, min_level=model_config.min_level, max_level=model_config.max_level, num_scales=model_config.anchor.num_scales, aspect_ratios=model_config.anchor.aspect_ratios, anchor_size=model_config.anchor.anchor_size) if quantization.quantize_detection_head: # Call the model with dummy input to build the head part. dummpy_input = tf.zeros([1] + model_config.input_size) height, width, _ = model_config.input_size image_shape = [[height, width]] optimized_model.call(dummpy_input, image_shape=image_shape, training=False) helper.copy_original_weights(model.head, optimized_model.head) return optimized_model def build_qat_segmentation_model( model: tf.keras.Model, quantization: common.Quantization, input_specs: tf.keras.layers.InputSpec) -> tf.keras.Model: """Applies quantization aware training for segmentation model. Args: model: The model applying quantization aware training. quantization: The Quantization config. input_specs: The shape specifications of input tensor. Returns: The model that applied optimization techniques. """ original_checkpoint = quantization.pretrained_original_checkpoint if original_checkpoint is not None: ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items) status = ckpt.read(original_checkpoint) status.expect_partial().assert_existing_objects_matched() # Build quantization compatible model. model = qat_segmentation_model.SegmentationModelQuantized( model.backbone, model.decoder, model.head, input_specs) scope_dict = { 'L2': tf.keras.regularizers.l2, } model.use_legacy_config = True # Ensures old Keras serialization format # Apply QAT to backbone (a tf.keras.Model) first. with tfmot.quantization.keras.quantize_scope(scope_dict): annotated_backbone = tfmot.quantization.keras.quantize_annotate_model( model.backbone) optimized_backbone = tfmot.quantization.keras.quantize_apply( annotated_backbone, scheme=schemes.Default8BitQuantizeScheme()) backbone_optimized_model = qat_segmentation_model.SegmentationModelQuantized( optimized_backbone, model.decoder, model.head, input_specs) # Copy over all remaining layers. for from_layer, to_layer in zip(model.layers, backbone_optimized_model.layers): if from_layer != model.backbone: to_layer.set_weights(from_layer.get_weights()) with tfmot.quantization.keras.quantize_scope(scope_dict): def apply_quantization_to_layers(layer): if isinstance(layer, (segmentation_heads.SegmentationHead, nn_layers.SpatialPyramidPooling, aspp.ASPP)): return tfmot.quantization.keras.quantize_annotate_layer(layer) return layer backbone_optimized_model.use_legacy_config = True annotated_model = tf.keras.models.clone_model( backbone_optimized_model, clone_function=apply_quantization_to_layers, ) annotated_model.use_legacy_config = True optimized_model = tfmot.quantization.keras.quantize_apply( annotated_model, scheme=schemes.Default8BitQuantizeScheme()) return optimized_model
11,044
39.163636
108
py
models
models-master/official/projects/qat/vision/modeling/segmentation_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build segmentation models.""" from typing import Any, Mapping, Union # Import libraries import tensorflow as tf layers = tf.keras.layers @tf.keras.utils.register_keras_serializable(package='Vision') class SegmentationModelQuantized(tf.keras.Model): """A Segmentation class model. Input images are passed through backbone first. Decoder network is then applied, and finally, segmentation head is applied on the output of the decoder network. Layers such as ASPP should be part of decoder. Any feature fusion is done as part of the segmentation head (i.e. deeplabv3+ feature fusion is not part of the decoder, instead it is part of the segmentation head). This way, different feature fusion techniques can be combined with different backbones, and decoders. """ def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.layers.Layer, head: tf.keras.layers.Layer, input_specs: tf.keras.layers.InputSpec, **kwargs): """Segmentation initialization function. Args: backbone: a backbone network. decoder: a decoder network. E.g. FPN. head: segmentation head. input_specs: The shape specifications of input tensor. **kwargs: keyword arguments to be passed. """ inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name) backbone_features = backbone(inputs) if decoder: backbone_feature = backbone_features[str(decoder.get_config()['level'])] decoder_feature = decoder(backbone_feature) else: decoder_feature = backbone_features backbone_feature = backbone_features[str(head.get_config()['low_level'])] x = {'logits': head((backbone_feature, decoder_feature))} super().__init__(inputs=inputs, outputs=x, **kwargs) self._config_dict = { 'backbone': backbone, 'decoder': decoder, 'head': head, } self.backbone = backbone self.decoder = decoder self.head = head @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict(backbone=self.backbone, head=self.head) if self.decoder is not None: items.update(decoder=self.decoder) return items def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
3,077
35.211765
79
py
models
models-master/official/projects/qat/vision/modeling/layers/nn_layers.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for neural networks.""" import enum from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper from official.vision.modeling import heads from official.vision.modeling.decoders import aspp from official.vision.modeling.layers import nn_layers # Type annotations. States = Dict[str, tf.Tensor] Activation = Union[str, Callable] # String constants. class FeatureFusion(str, enum.Enum): PYRAMID_FUSION = 'pyramid_fusion' PANOPTIC_FPN_FUSION = 'panoptic_fpn_fusion' DEEPLABV3PLUS = 'deeplabv3plus' DEEPLABV3PLUS_SUM_TO_MERGE = 'deeplabv3plus_sum_to_merge' @tf.keras.utils.register_keras_serializable(package='Vision') class SqueezeExcitationQuantized( helper.LayerQuantizerHelper, tf.keras.layers.Layer): """Creates a squeeze and excitation layer.""" def __init__(self, in_filters, out_filters, se_ratio, divisible_by=1, use_3d_input=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', gating_activation='sigmoid', round_down_protect=True, **kwargs): """Initializes a squeeze and excitation layer. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. use_3d_input: A `bool` of whether input is 2D or 3D image. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. gating_activation: A `str` name of the activation function for final gating function. round_down_protect: A `bool` of whether round down more than 10% will be allowed. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._se_ratio = se_ratio self._divisible_by = divisible_by self._round_down_protect = round_down_protect self._use_3d_input = use_3d_input self._activation = activation self._gating_activation = gating_activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer if tf.keras.backend.image_data_format() == 'channels_last': if not use_3d_input: self._spatial_axis = [1, 2] else: self._spatial_axis = [1, 2, 3] else: if not use_3d_input: self._spatial_axis = [2, 3] else: self._spatial_axis = [2, 3, 4] def _create_gating_activation_layer(self): if self._gating_activation == 'hard_sigmoid': # Convert hard_sigmoid activation to quantizable keras layers so each op # can be properly quantized. # Formula is hard_sigmoid(x) = relu6(x + 3) * 0.16667. self._add_quantizer('add_three') self._add_quantizer('divide_six') self._relu6 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation('relu6', use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) else: self._gating_activation_layer = ( tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation( self._gating_activation, use_keras_layer=True ), configs.Default8BitActivationQuantizeConfig(), ) ) def _apply_gating_activation_layer( self, x: tf.Tensor, training: bool) -> tf.Tensor: if self._gating_activation == 'hard_sigmoid': x = self._apply_quantizer('add_three', x + 3.0, training) x = self._relu6(x) x = self._apply_quantizer('divide_six', x * 1.6667, training) else: x = self._gating_activation_layer(x) return x def build(self, input_shape): num_reduced_filters = nn_layers.make_divisible( max(1, int(self._in_filters * self._se_ratio)), divisor=self._divisible_by, round_down_protect=self._round_down_protect) self._se_reduce = helper.Conv2DQuantized( filters=num_reduced_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._se_expand = helper.Conv2DOutputQuantized( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._multiply = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Multiply(), configs.Default8BitQuantizeConfig([], [], True)) self._reduce_mean_quantizer = ( tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False)) self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build( None, 'reduce_mean_quantizer_vars', self) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) self._create_gating_activation_layer() self._build_quantizer_vars() super().build(input_shape) def get_config(self): config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'use_3d_input': self._use_3d_input, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'gating_activation': self._gating_activation, 'round_down_protect': self._round_down_protect, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True) x = self._reduce_mean_quantizer( x, training, self._reduce_mean_quantizer_vars) x = self._activation_layer(self._se_reduce(x)) x = self._apply_gating_activation_layer(self._se_expand(x), training) x = self._multiply([x, inputs]) return x @tf.keras.utils.register_keras_serializable(package='Vision') class SegmentationHeadQuantized(tf.keras.layers.Layer): """Creates a segmentation head.""" def __init__( self, num_classes: int, level: Union[int, str], num_convs: int = 2, num_filters: int = 256, use_depthwise_convolution: bool = False, prediction_kernel_size: int = 1, upsample_factor: int = 1, feature_fusion: Optional[str] = None, decoder_min_level: Optional[int] = None, decoder_max_level: Optional[int] = None, low_level: int = 2, low_level_num_filters: int = 48, num_decoder_filters: int = 256, activation: str = 'relu', logit_activation: Optional[str] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a segmentation head. Args: num_classes: An `int` number of mask classification categories. The number of classes does not include background class. level: An `int` or `str`, level to use to build segmentation head. num_convs: An `int` number of stacked convolution before the last prediction layer. num_filters: An `int` number to specify the number of filters used. Default is 256. use_depthwise_convolution: A bool to specify if use depthwise separable convolutions. prediction_kernel_size: An `int` number to specify the kernel size of the prediction layer. upsample_factor: An `int` number to specify the upsampling factor to generate finer mask. Default 1 means no upsampling is applied. feature_fusion: One of `deeplabv3plus`, `deeplabv3plus_sum_to_merge`, `pyramid_fusion`, or None. If `deeplabv3plus`, features from decoder_features[level] will be fused with low level feature maps from backbone. If `pyramid_fusion`, multiscale features will be resized and fused at the target level. decoder_min_level: An `int` of minimum level from decoder to use in feature fusion. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. decoder_max_level: An `int` of maximum level from decoder to use in feature fusion. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. low_level: An `int` of backbone level to be used for feature fusion. It is used when feature_fusion is set to `deeplabv3plus`. low_level_num_filters: An `int` of reduced number of filters for the low level features before fusing it with higher level features. It is only used when feature_fusion is set to `deeplabv3plus`. num_decoder_filters: An `int` of number of filters in the decoder outputs. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. logit_activation: Unused. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._config_dict = { 'num_classes': num_classes, 'level': level, 'num_convs': num_convs, 'num_filters': num_filters, 'use_depthwise_convolution': use_depthwise_convolution, 'prediction_kernel_size': prediction_kernel_size, 'upsample_factor': upsample_factor, 'feature_fusion': feature_fusion, 'decoder_min_level': decoder_min_level, 'decoder_max_level': decoder_max_level, 'low_level': low_level, 'low_level_num_filters': low_level_num_filters, 'num_decoder_filters': num_decoder_filters, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) if logit_activation: raise ValueError('Unused logit_activation option inherited from ' 'vision SegmentationHead modeling config.') def build(self, input_shape: Sequence[tf.TensorShape]): """Creates the variables of the segmentation head.""" # When input_shape is a list/tuple, the first corresponds to backbone # features used for resizing the decoder features (the second) if feature # fusion type is `deeplabv3plus`. backbone_shape = input_shape[0] use_depthwise_convolution = self._config_dict['use_depthwise_convolution'] random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01) conv_kwargs = { 'kernel_size': 3 if not use_depthwise_convolution else 1, 'padding': 'same', 'use_bias': False, 'kernel_initializer': random_initializer, 'kernel_regularizer': self._config_dict['kernel_regularizer'], } norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._config_dict['use_sync_bn'] else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) norm = helper.norm_by_activation(self._config_dict['activation'], norm_with_quantize, norm_no_quantize) bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], } if self._config_dict['feature_fusion'] in [ FeatureFusion.DEEPLABV3PLUS, FeatureFusion.DEEPLABV3PLUS_SUM_TO_MERGE ]: # Deeplabv3+ feature fusion layers. self._dlv3p_conv = helper.Conv2DQuantized( kernel_size=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(random_initializer), kernel_regularizer=self._config_dict['kernel_regularizer'], name='segmentation_head_deeplabv3p_fusion_conv', filters=self._config_dict['low_level_num_filters'], activation=helper.NoOpActivation()) self._dlv3p_norm = norm( name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs) # Segmentation head layers. self._convs = [] self._norms = [] for i in range(self._config_dict['num_convs']): if use_depthwise_convolution: self._convs.append( helper.DepthwiseConv2DQuantized( name='segmentation_head_depthwise_conv_{}'.format(i), kernel_size=3, padding='same', use_bias=False, depthwise_initializer=tf_utils.clone_initializer( random_initializer), depthwise_regularizer=self._config_dict['kernel_regularizer'], depth_multiplier=1, activation=helper.NoOpActivation())) norm_name = 'segmentation_head_depthwise_norm_{}'.format(i) self._norms.append(norm(name=norm_name, **bn_kwargs)) conv_name = 'segmentation_head_conv_{}'.format(i) self._convs.append( helper.Conv2DQuantized( name=conv_name, filters=self._config_dict['num_filters'], activation=helper.NoOpActivation(), **conv_kwargs)) norm_name = 'segmentation_head_norm_{}'.format(i) self._norms.append(norm(name=norm_name, **bn_kwargs)) self._classifier = helper.Conv2DOutputQuantized( name='segmentation_output', filters=self._config_dict['num_classes'], kernel_size=self._config_dict['prediction_kernel_size'], padding='same', bias_initializer=tf.zeros_initializer(), kernel_initializer=tf_utils.clone_initializer(random_initializer), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], activation=helper.NoOpActivation()) self._upsampling_layer = helper.UpSampling2DQuantized( size=(self._config_dict['upsample_factor'], self._config_dict['upsample_factor']), interpolation='nearest') self._resizing_layer = helper.ResizingQuantized( backbone_shape[1], backbone_shape[2], interpolation='bilinear') self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis) self._add_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True)) super().build(input_shape) def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]], Union[tf.Tensor, Mapping[str, tf.Tensor]]]): """Forward pass of the segmentation head. It supports both a tuple of 2 tensors or 2 dictionaries. The first is backbone endpoints, and the second is decoder endpoints. When inputs are tensors, they are from a single level of feature maps. When inputs are dictionaries, they contain multiple levels of feature maps, where the key is the index of feature map. Args: inputs: A tuple of 2 feature map tensors of shape [batch, height_l, width_l, channels] or 2 dictionaries of tensors: - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor` of the feature map tensors, whose shape is [batch, height_l, width_l, channels]. Returns: segmentation prediction mask: A `tf.Tensor` of the segmentation mask scores predicted from input features. """ if self._config_dict['feature_fusion'] in ( FeatureFusion.PYRAMID_FUSION, FeatureFusion.PANOPTIC_FPN_FUSION): raise ValueError( 'The feature fusion method `pyramid_fusion` is not supported in QAT.') backbone_output = inputs[0] decoder_output = inputs[1] if self._config_dict['feature_fusion'] in { FeatureFusion.DEEPLABV3PLUS, FeatureFusion.DEEPLABV3PLUS_SUM_TO_MERGE }: # deeplabv3+ feature fusion. x = decoder_output[str(self._config_dict['level'])] if isinstance( decoder_output, dict) else decoder_output y = backbone_output[str(self._config_dict['low_level'])] if isinstance( backbone_output, dict) else backbone_output y = self._dlv3p_norm(self._dlv3p_conv(y)) y = self._activation_layer(y) x = self._resizing_layer(x) x = tf.cast(x, dtype=y.dtype) if self._config_dict['feature_fusion'] == FeatureFusion.DEEPLABV3PLUS: x = self._concat_layer([x, y]) else: x = self._add_layer([x, y]) else: x = decoder_output[str(self._config_dict['level'])] if isinstance( decoder_output, dict) else decoder_output for conv, norm in zip(self._convs, self._norms): x = conv(x) x = norm(x) x = self._activation_layer(x) if self._config_dict['upsample_factor'] > 1: # Use keras layer for nearest upsampling so it is QAT compatible. x = self._upsampling_layer(x) return self._classifier(x) def get_config(self): base_config = super().get_config() return dict(list(base_config.items()) + list(self._config_dict.items())) @classmethod def from_config(cls, config): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Vision') class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling): """Implements the quantized Atrous Spatial Pyramid Pooling. References: [Rethinking Atrous Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1706.05587.pdf) [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) """ def __init__( self, output_channels: int, dilation_rates: List[int], pool_kernel_size: Optional[List[int]] = None, use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', dropout: float = 0.5, kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', use_depthwise_convolution: bool = False, **kwargs): """Initializes `SpatialPyramidPooling`. Args: output_channels: Number of channels produced by SpatialPyramidPooling. dilation_rates: A list of integers for parallel dilated conv. pool_kernel_size: A list of integers or None. If None, global average pooling is applied, otherwise an average pooling of pool_kernel_size is applied. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for type of activation to be used. Defaults to 'relu'. dropout: A float for the dropout rate before output. Defaults to 0.5. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. use_depthwise_convolution: Allows spatial pooling to be separable depthwise convolusions. [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1802.02611.pdf) **kwargs: Other keyword arguments for the layer. """ super().__init__( output_channels=output_channels, dilation_rates=dilation_rates, use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, activation=activation, dropout=dropout, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation, pool_kernel_size=pool_kernel_size, use_depthwise_convolution=use_depthwise_convolution) self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) self._activation_fn_no_quant = ( tf_utils.get_activation(activation, use_keras_layer=True)) def build(self, input_shape): height = input_shape[1] width = input_shape[2] channels = input_shape[3] norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._use_sync_bn else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) norm = helper.norm_by_activation(self._activation, norm_with_quantize, norm_no_quantize) self.aspp_layers = [] conv1 = helper.Conv2DQuantized( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False, activation=helper.NoOpActivation()) norm1 = norm( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self.aspp_layers.append([conv1, norm1]) for dilation_rate in self._dilation_rates: leading_layers = [] kernel_size = (3, 3) if self._use_depthwise_convolution: leading_layers += [ helper.DepthwiseConv2DOutputQuantized( depth_multiplier=1, kernel_size=kernel_size, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=tf_utils.clone_initializer( self._kernel_initializer), dilation_rate=dilation_rate, use_bias=False, activation=helper.NoOpActivation()) ] kernel_size = (1, 1) conv_dilation = leading_layers + [ helper.Conv2DQuantized( filters=self._output_channels, kernel_size=kernel_size, padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), dilation_rate=dilation_rate, use_bias=False, activation=helper.NoOpActivation()) ] norm_dilation = norm( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self.aspp_layers.append(conv_dilation + [norm_dilation]) if self._pool_kernel_size is None: pooling = [ helper.GlobalAveragePooling2DQuantized(), helper.ReshapeQuantized((1, 1, channels)) ] else: pooling = [helper.AveragePooling2DQuantized(self._pool_kernel_size)] conv2 = helper.Conv2DQuantized( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False, activation=helper.NoOpActivation()) norm2 = norm( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self.aspp_layers.append(pooling + [conv2, norm2]) self._resizing_layer = helper.ResizingQuantized( height, width, interpolation=self._interpolation) self._projection = [ helper.Conv2DQuantized( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False, activation=helper.NoOpActivation()), norm( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) ] self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout) self._concat_layer = helper.ConcatenateQuantized(axis=-1) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: if training is None: training = tf.keras.backend.learning_phase() result = [] for i, layers in enumerate(self.aspp_layers): x = inputs for layer in layers: # Apply layers sequentially. x = layer(x, training=training) x = self._activation_fn(x) # Apply resize layer to the end of the last set of layers. if i == len(self.aspp_layers) - 1: x = self._resizing_layer(x) result.append(tf.cast(x, inputs.dtype)) x = self._concat_layer(result) for layer in self._projection: x = layer(x, training=training) x = self._activation_fn(x) return self._dropout_layer(x) @tf.keras.utils.register_keras_serializable(package='Vision') class ASPPQuantized(aspp.ASPP): """Creates a quantized Atrous Spatial Pyramid Pooling (ASPP) layer.""" def __init__( self, level: int, dilation_rates: List[int], num_filters: int = 256, pool_kernel_size: Optional[int] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, activation: str = 'relu', dropout_rate: float = 0.0, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', use_depthwise_convolution: bool = False, spp_layer_version: str = 'v1', output_tensor: bool = True, **kwargs): """Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer. Args: level: An `int` level to apply ASPP. dilation_rates: A `list` of dilation rates. num_filters: An `int` number of output filters in ASPP. pool_kernel_size: A `list` of [height, width] of pooling kernel size or None. Pooling size is with respect to original image size, it will be scaled down by 2**level. If None, global average pooling is used. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. activation: A `str` activation to be used in ASPP. dropout_rate: A `float` rate for dropout regularization. kernel_initializer: A `str` name of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. interpolation: A `str` of interpolation method. It should be one of `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, or `mitchellcubic`. use_depthwise_convolution: If True depthwise separable convolutions will be added to the Atrous spatial pyramid pooling. spp_layer_version: A `str` of spatial pyramid pooling layer version. output_tensor: Whether to output a single tensor or a dictionary of tensor. Default is true. **kwargs: Additional keyword arguments to be passed. """ super().__init__( level=level, dilation_rates=dilation_rates, num_filters=num_filters, pool_kernel_size=pool_kernel_size, use_sync_bn=use_sync_bn, norm_momentum=norm_momentum, norm_epsilon=norm_epsilon, activation=activation, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation, use_depthwise_convolution=use_depthwise_convolution, spp_layer_version=spp_layer_version, output_tensor=output_tensor, **kwargs) self._aspp_layer = SpatialPyramidPoolingQuantized def call(self, inputs: Union[tf.Tensor, Mapping[str, tf.Tensor]]) -> tf.Tensor: """Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input. The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one level is present, if output_tensor is false. Hence, this will be compatible with the rest of the segmentation model interfaces. If output_tensor is true, a single tensot is output. Args: inputs: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel feature maps. - values: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size]. Returns: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel feature maps. - values: A `tf.Tensor` of output of ASPP module. """ level = str(self._config_dict['level']) backbone_output = inputs[level] if isinstance(inputs, dict) else inputs return self.aspp(backbone_output) class BatchNormalizationWrapper(tf.keras.layers.Wrapper): """A BatchNormalizationWrapper that explicitly not folded. It just added an identity depthwise conv right before the normalization. As a result, given normalization op just folded into the identity depthwise conv layer. Note that it only used when the batch normalization folding is not working. It makes quantize them as a 1x1 depthwise conv layer that just work as same as inference mode for the normalization. (Basically mult and add for the BN.) """ def call(self, inputs: tf.Tensor, *args: Any, **kwargs: Any) -> tf.Tensor: channels = tf.shape(inputs)[-1] x = tf.nn.depthwise_conv2d( inputs, tf.ones([1, 1, channels, 1]), [1, 1, 1, 1], 'VALID') outputs = self.layer.call(x, *args, **kwargs) return outputs class MaskScoringQuantized(heads.MaskScoring): """Creates a quantized mask scoring layer. This implements mask scoring layer from the paper: Zhaojin Huang, Lichao Huang, Yongchao Gong, Chang Huang, Xinggang Wang. Mask Scoring R-CNN. (https://arxiv.org/pdf/1903.00241.pdf) """ def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the mask scoring head.""" self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation( self._config_dict['activation'], use_keras_layer=True ), configs.Default8BitActivationQuantizeConfig(), ) conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', } conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal' ), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._config_dict['use_sync_bn'] else tf.keras.layers.BatchNormalization ) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) bn_op = helper.norm_by_activation( self._config_dict['activation'], norm_with_quantize, norm_no_quantize ) bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], } self._convs = [] self._conv_norms = [] for i in range(self._config_dict['num_convs']): if self._config_dict['use_depthwise_convolution']: self._convs.append( helper.DepthwiseConv2DQuantized( name='mask-scoring-depthwise-conv-{}'.format(i), kernel_size=3, padding='same', use_bias=False, depthwise_initializer=tf.keras.initializers.RandomNormal( stddev=0.01), depthwise_regularizer=self._config_dict['kernel_regularizer'], depth_multiplier=1, activation=helper.NoOpActivation())) norm_name = 'mask-scoring-depthwise-bn-{}'.format(i) self._conv_norms.append(bn_op(name=norm_name, **bn_kwargs)) conv_name = 'mask-scoring_{}'.format(i) if 'kernel_initializer' in conv_kwargs: conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer( conv_kwargs['kernel_initializer'] ) if self._config_dict['use_depthwise_convolution']: conv_kwargs['kernel_size'] = 1 self._convs.append( helper.Conv2DQuantized( name=conv_name, activation=helper.NoOpActivation(), **conv_kwargs ) ) bn_name = 'mask-scoring-bn_{}'.format(i) self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._fcs = [] self._fc_norms = [] for i in range(self._config_dict['num_fcs']): fc_name = 'mask-scoring-fc_{}'.format(i) self._fcs.append( helper.DenseQuantized( units=self._config_dict['fc_dims'], kernel_initializer=tf.keras.initializers.VarianceScaling( scale=1 / 3.0, mode='fan_out', distribution='uniform' ), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name=fc_name, activation=helper.NoOpActivation(), ) ) bn_name = 'mask-scoring-fc-bn_{}'.format(i) self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._classifier = helper.DenseOutputQuantized( units=self._config_dict['num_classes'], kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), bias_initializer=tf.zeros_initializer(), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name='iou-scores', ) self._resizing_layer = helper.ResizingQuantized( self._config_dict['fc_input_size'][0], self._config_dict['fc_input_size'][1], interpolation='bilinear', ) self._identity_layer = helper.IdentityQuantized(trainable=False) super().build(input_shape) def call(self, inputs: tf.Tensor, training: bool = None): """Forward pass mask scoring head. Args: inputs: A `tf.Tensor` of the shape [batch_size, width, size, num_classes], representing the segmentation logits. training: a `bool` indicating whether it is in `training` mode. Returns: mask_scores: A `tf.Tensor` of predicted mask scores [batch_size, num_classes]. """ x = tf.stop_gradient(inputs) for conv, bn in zip(self._convs, self._conv_norms): x = conv(x) x = bn(x) x = self._activation_layer(x) x = self._resizing_layer(x) _, h, w, filters = x.get_shape().as_list() x = tf.reshape(x, [-1, h * w * filters]) for fc, bn in zip(self._fcs, self._fc_norms): x = fc(x) x = bn(x) x = self._activation_layer(x) ious = self._classifier(x) ious = self._identity_layer(ious) return ious
38,727
39.55288
87
py
models
models-master/official/projects/qat/vision/modeling/layers/nn_blocks_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_blocks.""" from typing import Any, Iterable, Tuple # Import libraries from absl.testing import parameterized import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.projects.qat.vision.modeling.layers import nn_blocks def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: """Returns the combinations of end-to-end tests to run.""" return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], ) class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks.BottleneckBlockQuantized, 1, False, 0.0, None), (nn_blocks.BottleneckBlockQuantized, 2, True, 0.2, 0.25), ) def test_bottleneck_block_creation(self, block_fn, strides, use_projection, stochastic_depth_drop_rate, se_ratio): input_size = 128 filter_size = 256 inputs = tf.keras.Input( shape=(input_size, input_size, filter_size * 4), batch_size=1) block = block_fn( filter_size, strides, use_projection=use_projection, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, filter_size * 4], features.shape.as_list()) @parameterized.parameters( (nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, None, None), (nn_blocks.InvertedBottleneckBlockQuantized, 6, 1, None, None), (nn_blocks.InvertedBottleneckBlockQuantized, 1, 2, None, None), (nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, 0.2, None), (nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, None, 0.2), ) def test_invertedbottleneck_block_creation( self, block_fn, expand_ratio, strides, se_ratio, stochastic_depth_drop_rate): input_size = 128 in_filters = 24 out_filters = 40 inputs = tf.keras.Input( shape=(input_size, input_size, in_filters), batch_size=1) block = block_fn( in_filters=in_filters, out_filters=out_filters, expand_ratio=expand_ratio, strides=strides, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, output_intermediate_endpoints=False) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, out_filters], features.shape.as_list()) if __name__ == '__main__': tf.test.main()
3,345
33.854167
77
py
models
models-master/official/projects/qat/vision/modeling/layers/nn_layers_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_layers.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.projects.qat.vision.modeling.layers import nn_layers class NNLayersTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('deeplabv3plus', 1, 128, 128), ('deeplabv3plus', 2, 128, 128), ('deeplabv3', 1, 128, 64), ('deeplabv3', 2, 128, 64), ('deeplabv3plus_sum_to_merge', 1, 64, 128), ('deeplabv3plus_sum_to_merge', 2, 64, 128), ) def test_segmentation_head_creation(self, feature_fusion, upsample_factor, low_level_num_filters, expected_shape): input_size = 128 decoder_outupt_size = input_size // 2 decoder_output = tf.random.uniform( (2, decoder_outupt_size, decoder_outupt_size, 64), dtype=tf.float32) backbone_output = tf.random.uniform((2, input_size, input_size, 32), dtype=tf.float32) segmentation_head = nn_layers.SegmentationHeadQuantized( num_classes=5, level=4, upsample_factor=upsample_factor, low_level=2, low_level_num_filters=low_level_num_filters, feature_fusion=feature_fusion) features = segmentation_head((backbone_output, decoder_output)) self.assertAllEqual([ 2, expected_shape * upsample_factor, expected_shape * upsample_factor, 5 ], features.shape.as_list()) @parameterized.parameters( (None, []), (None, [6, 12, 18]), ([32, 32], [6, 12, 18]), ) def test_spatial_pyramid_pooling_creation(self, pool_kernel_size, dilation_rates): inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32) layer = nn_layers.SpatialPyramidPoolingQuantized( output_channels=256, dilation_rates=dilation_rates, pool_kernel_size=pool_kernel_size) output = layer(inputs) self.assertAllEqual([None, 64, 64, 256], output.shape) @parameterized.parameters( (3, [6, 12, 18, 24], 128), (3, [6, 12, 18], 128), (3, [6, 12], 256), (4, [], 128), (4, [6, 12, 18], 128), (4, [], 256), ) def test_aspp_creation(self, level, dilation_rates, num_filters): input_size = 128 // 2**level tf.keras.backend.set_image_data_format('channels_last') endpoints = tf.random.uniform( shape=(2, input_size, input_size, 64), dtype=tf.float32) network = nn_layers.ASPPQuantized( level=level, dilation_rates=dilation_rates, num_filters=num_filters) feats = network(endpoints) self.assertAllEqual([2, input_size, input_size, num_filters], feats.shape.as_list()) @parameterized.parameters(False, True) def test_bnorm_wrapper_creation(self, use_sync_bn): inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32) if use_sync_bn: norm = tf.keras.layers.experimental.SyncBatchNormalization(axis=-1) else: norm = tf.keras.layers.BatchNormalization(axis=-1) layer = nn_layers.BatchNormalizationWrapper(norm) output = layer(inputs) self.assertAllEqual([None, 64, 64, 128], output.shape) @parameterized.parameters( (1, 1, 64, [4, 4]), (2, 1, 64, [4, 4]), (3, 1, 64, [4, 4]), (1, 2, 32, [8, 8]), (2, 2, 32, [8, 8]), (3, 2, 32, [8, 8]), ) def test_mask_scoring_creation( self, num_convs, num_fcs, num_filters, fc_input_size ): inputs = tf.keras.Input(shape=(64, 64, 16), dtype=tf.float32) head = nn_layers.MaskScoringQuantized( num_classes=2, num_convs=num_convs, num_filters=num_filters, fc_dims=128, num_fcs=num_fcs, fc_input_size=fc_input_size, use_depthwise_convolution=True, ) scores = head(inputs) self.assertAllEqual(scores.shape.as_list(), [None, 2]) if __name__ == '__main__': tf.test.main()
4,538
32.873134
80
py
models
models-master/official/projects/qat/vision/modeling/layers/nn_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains quantized neural blocks for the QAT.""" from typing import Any, Dict, Optional, Sequence, Tuple, Union # Import libraries from absl import logging import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper from official.vision.modeling.layers import nn_layers # This class is copied from modeling.layers.nn_blocks.BottleneckBlock and apply # QAT. @tf.keras.utils.register_keras_serializable(package='Vision') class BottleneckBlockQuantized(tf.keras.layers.Layer): """A quantized standard bottleneck block.""" def __init__(self, filters: int, strides: int, dilation_rate: int = 1, use_projection: bool = False, se_ratio: Optional[float] = None, resnetd_shortcut: bool = False, stochastic_depth_drop_rate: Optional[float] = None, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: tf.keras.regularizers.Regularizer = None, bias_regularizer: tf.keras.regularizers.Regularizer = None, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, bn_trainable: bool = True, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Initializes a standard bottleneck block with BN after convolutions. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. dilation_rate: An `int` dilation_rate of convolutions. Default to 1. use_projection: A `bool` for whether this block should use a projection shortcut (versus the default identity shortcut). This is usually `True` for the first block of a block group, which may change the number of filters and the resolution. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. resnetd_shortcut: A `bool`. If True, apply the resnetd style modification to the shortcut connection. stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. bn_trainable: A `bool` that indicates whether batch norm layers should be trainable. Default to True. **kwargs: Additional keyword arguments to be passed. """ super(BottleneckBlockQuantized, self).__init__(**kwargs) self._filters = filters self._strides = strides self._dilation_rate = dilation_rate self._use_projection = use_projection self._se_ratio = se_ratio self._resnetd_shortcut = resnetd_shortcut self._use_sync_bn = use_sync_bn self._activation = activation self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if use_sync_bn else tf.keras.layers.BatchNormalization) self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) self._norm = helper.BatchNormalizationNoQuantized(norm_layer) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._bn_trainable = bn_trainable def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" if self._use_projection: if self._resnetd_shortcut: self._shortcut0 = tf.keras.layers.AveragePooling2D( pool_size=2, strides=self._strides, padding='same') self._shortcut1 = helper.Conv2DQuantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) else: self._shortcut = helper.Conv2DQuantized( filters=self._filters * 4, kernel_size=1, strides=self._strides, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm0 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._conv1 = helper.Conv2DQuantized( filters=self._filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) self._conv2 = helper.Conv2DQuantized( filters=self._filters, kernel_size=3, strides=self._strides, dilation_rate=self._dilation_rate, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) self._conv3 = helper.Conv2DQuantized( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm3 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable) self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True)) super(BottleneckBlockQuantized, self).build(input_shape) def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'filters': self._filters, 'strides': self._strides, 'dilation_rate': self._dilation_rate, 'use_projection': self._use_projection, 'se_ratio': self._se_ratio, 'resnetd_shortcut': self._resnetd_shortcut, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'bn_trainable': self._bn_trainable } base_config = super(BottleneckBlockQuantized, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor: """Run the BottleneckBlockQuantized logics.""" shortcut = inputs if self._use_projection: if self._resnetd_shortcut: shortcut = self._shortcut0(shortcut) shortcut = self._shortcut1(shortcut) else: shortcut = self._shortcut(shortcut) shortcut = self._norm0(shortcut) x = self._conv1(inputs) x = self._norm1(x) x = self._activation1(x) x = self._conv2(x) x = self._norm2(x) x = self._activation2(x) x = self._conv3(x) x = self._norm3(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) return self._activation3(x) # This class is copied from modeling.backbones.mobilenet.Conv2DBNBlock and apply # QAT. @tf.keras.utils.register_keras_serializable(package='Vision') class Conv2DBNBlockQuantized(tf.keras.layers.Layer): """A quantized convolution block with batch normalization.""" def __init__( self, filters: int, kernel_size: int = 3, strides: int = 1, use_bias: bool = False, use_explicit_padding: bool = False, activation: str = 'relu6', kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, use_normalization: bool = True, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, **kwargs): """A convolution block with batch normalization. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. kernel_size: An `int` specifying the height and width of the 2D convolution window. strides: An `int` of block stride. If greater than 1, this block will ultimately downsample the input. use_bias: If True, use bias in the convolution layer. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. activation: A `str` name of the activation function. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. use_normalization: If True, use batch normalization. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ super(Conv2DBNBlockQuantized, self).__init__(**kwargs) self._filters = filters self._kernel_size = kernel_size self._strides = strides self._activation = activation self._use_bias = use_bias self._use_explicit_padding = use_explicit_padding self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_normalization = use_normalization self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon if use_explicit_padding and kernel_size > 1: self._padding = 'valid' else: self._padding = 'same' norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if use_sync_bn else tf.keras.layers.BatchNormalization) self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) self._norm = helper.BatchNormalizationNoQuantized(norm_layer) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'filters': self._filters, 'strides': self._strides, 'kernel_size': self._kernel_size, 'use_bias': self._use_bias, 'use_explicit_padding': self._use_explicit_padding, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'use_normalization': self._use_normalization, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } base_config = super(Conv2DBNBlockQuantized, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" if self._use_explicit_padding and self._kernel_size > 1: padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size) self._pad = tf.keras.layers.ZeroPadding2D(padding_size) conv2d_quantized = ( helper.Conv2DQuantized if self._use_normalization else helper.Conv2DOutputQuantized) self._conv0 = conv2d_quantized( filters=self._filters, kernel_size=self._kernel_size, strides=self._strides, padding=self._padding, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) if self._use_normalization: self._norm0 = helper.norm_by_activation(self._activation, self._norm_with_quantize, self._norm)( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) super(Conv2DBNBlockQuantized, self).build(input_shape) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor: """Run the Conv2DBNBlockQuantized logics.""" if self._use_explicit_padding and self._kernel_size > 1: inputs = self._pad(inputs) x = self._conv0(inputs) if self._use_normalization: x = self._norm0(x) return self._activation_layer(x) @tf.keras.utils.register_keras_serializable(package='Vision') class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer): """A quantized inverted bottleneck block.""" def __init__(self, in_filters, out_filters, expand_ratio, strides, kernel_size=3, se_ratio=None, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', se_inner_activation='relu', se_gating_activation='sigmoid', se_round_down_protect=True, expand_se_in_filters=False, depthwise_activation=None, use_sync_bn=False, dilation_rate=1, divisible_by=1, regularize_depthwise=False, use_depthwise=True, use_residual=True, norm_momentum=0.99, norm_epsilon=0.001, output_intermediate_endpoints=False, **kwargs): """Initializes an inverted bottleneck block with BN after convolutions. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. expand_ratio: An `int` of expand_ratio for an inverted bottleneck block. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. kernel_size: An `int` kernel_size of the depthwise conv layer. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. se_inner_activation: A `str` name of squeeze-excitation inner activation. se_gating_activation: A `str` name of squeeze-excitation gating activation. se_round_down_protect: A `bool` of whether round down more than 10% will be allowed in SE layer. expand_se_in_filters: A `bool` of whether or not to expand in_filter in squeeze and excitation layer. depthwise_activation: A `str` name of the activation function for depthwise only. use_sync_bn: A `bool`. If True, use synchronized batch normalization. dilation_rate: An `int` that specifies the dilation rate to use for. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. dilated convolution: An `int` to specify the same value for all spatial dimensions. regularize_depthwise: A `bool` of whether or not apply regularization on depthwise. use_depthwise: A `bool` of whether to uses fused convolutions instead of depthwise. use_residual: A `bool` of whether to include residual connection between input and output. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. output_intermediate_endpoints: A `bool` of whether or not output the intermediate endpoints. **kwargs: Additional keyword arguments to be passed. """ super(InvertedBottleneckBlockQuantized, self).__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._expand_ratio = expand_ratio self._strides = strides self._kernel_size = kernel_size self._se_ratio = se_ratio self._divisible_by = divisible_by self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._dilation_rate = dilation_rate self._use_sync_bn = use_sync_bn self._regularize_depthwise = regularize_depthwise self._use_depthwise = use_depthwise self._use_residual = use_residual self._activation = activation self._se_inner_activation = se_inner_activation self._se_gating_activation = se_gating_activation self._se_round_down_protect = se_round_down_protect self._depthwise_activation = depthwise_activation self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._expand_se_in_filters = expand_se_in_filters self._output_intermediate_endpoints = output_intermediate_endpoints norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if use_sync_bn else tf.keras.layers.BatchNormalization) self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) self._norm = helper.BatchNormalizationNoQuantized(norm_layer) if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 if not depthwise_activation: self._depthwise_activation = activation if regularize_depthwise: self._depthsize_regularizer = kernel_regularizer else: self._depthsize_regularizer = None def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]): """Build variables and child layers to prepare for calling.""" expand_filters = self._in_filters if self._expand_ratio > 1: # First 1x1 conv for channel expansion. expand_filters = nn_layers.make_divisible( self._in_filters * self._expand_ratio, self._divisible_by) expand_kernel = 1 if self._use_depthwise else self._kernel_size expand_stride = 1 if self._use_depthwise else self._strides self._conv0 = helper.Conv2DQuantized( filters=expand_filters, kernel_size=expand_kernel, strides=expand_stride, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm0 = helper.norm_by_activation(self._activation, self._norm_with_quantize, self._norm)( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) if self._use_depthwise: # Depthwise conv. self._conv1 = helper.DepthwiseConv2DQuantized( kernel_size=(self._kernel_size, self._kernel_size), strides=self._strides, padding='same', depth_multiplier=1, dilation_rate=self._dilation_rate, use_bias=False, depthwise_initializer=self._kernel_initializer, depthwise_regularizer=self._depthsize_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm1 = helper.norm_by_activation(self._depthwise_activation, self._norm_with_quantize, self._norm)( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) self._depthwise_activation_layer = ( tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._depthwise_activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig())) # Squeeze and excitation. if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: logging.info('Use Squeeze and excitation.') in_filters = self._in_filters if self._expand_se_in_filters: in_filters = expand_filters self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized( in_filters=in_filters, out_filters=expand_filters, se_ratio=self._se_ratio, divisible_by=self._divisible_by, round_down_protect=self._se_round_down_protect, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._se_inner_activation, gating_activation=self._se_gating_activation) else: self._squeeze_excitation = None # Last 1x1 conv. self._conv2 = helper.Conv2DQuantized( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=helper.NoOpActivation()) self._norm2 = self._norm_with_quantize( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True)) super(InvertedBottleneckBlockQuantized, self).build(input_shape) def get_config(self) -> Dict[str, Any]: """Get a config of this layer.""" config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'expand_ratio': self._expand_ratio, 'strides': self._strides, 'kernel_size': self._kernel_size, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'se_inner_activation': self._se_inner_activation, 'se_gating_activation': self._se_gating_activation, 'se_round_down_protect': self._se_round_down_protect, 'expand_se_in_filters': self._expand_se_in_filters, 'depthwise_activation': self._depthwise_activation, 'dilation_rate': self._dilation_rate, 'use_sync_bn': self._use_sync_bn, 'regularize_depthwise': self._regularize_depthwise, 'use_depthwise': self._use_depthwise, 'use_residual': self._use_residual, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'output_intermediate_endpoints': self._output_intermediate_endpoints } base_config = super(InvertedBottleneckBlockQuantized, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: tf.Tensor, training: Optional[Union[bool, tf.Tensor]] = None ) -> Union[tf.Tensor, Tuple[tf.Tensor, Dict[str, tf.Tensor]]]: """Run the InvertedBottleneckBlockQuantized logics.""" endpoints = {} shortcut = inputs if self._expand_ratio > 1: x = self._conv0(inputs) x = self._norm0(x) x = self._activation_layer(x) else: x = inputs if self._use_depthwise: x = self._conv1(x) x = self._norm1(x) x = self._depthwise_activation_layer(x) if self._output_intermediate_endpoints: endpoints['depthwise'] = x if self._squeeze_excitation: x = self._squeeze_excitation(x) x = self._conv2(x) x = self._norm2(x) if (self._use_residual and self._in_filters == self._out_filters and self._strides == 1): if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) if self._output_intermediate_endpoints: return x, endpoints return x
30,118
40.948468
100
py
models
models-master/official/projects/qat/vision/modeling/heads/dense_prediction_heads.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of dense prediction heads.""" from typing import List, Mapping, Union, Optional, Any, Dict # Import libraries import numpy as np import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper @tf.keras.utils.register_keras_serializable(package='Vision') class RetinaNetHeadQuantized(tf.keras.layers.Layer): """Creates a RetinaNet quantized head.""" def __init__( self, min_level: int, max_level: int, num_classes: int, num_anchors_per_location: int, num_convs: int = 4, num_filters: int = 256, attribute_heads: Optional[List[Dict[str, Any]]] = None, use_separable_conv: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, num_params_per_anchor: int = 4, share_classification_heads: bool = False, share_level_convs: bool = True, **kwargs): """Initializes a RetinaNet quantized head. Args: min_level: An `int` number of minimum feature level. max_level: An `int` number of maximum feature level. num_classes: An `int` number of classes to predict. num_anchors_per_location: An `int` number of number of anchors per pixel location. num_convs: An `int` number that represents the number of the intermediate conv layers before the prediction. num_filters: An `int` number that represents the number of filters of the intermediate conv layers. attribute_heads: If not None, a list that contains a dict for each additional attribute head. Each dict consists of 4 key-value pairs: `name`, `type` ('regression' or 'classification'), `size` (number of predicted values for each instance), and `prediction_tower_name` (optional, specifies shared prediction towers.) use_separable_conv: A `bool` that indicates whether the separable convolution layers is used. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. num_params_per_anchor: Number of parameters required to specify an anchor box. For example, `num_params_per_anchor` would be 4 for axis-aligned anchor boxes specified by their y-centers, x-centers, heights, and widths. share_classification_heads: A `bool` that indicates whethere sharing weights among the main and attribute classification heads. Not used in the QAT model. share_level_convs: An optional bool to enable sharing convs across levels for classnet, boxnet, classifier and box regressor. If True, convs will be shared across all levels. Not used in the QAT model. **kwargs: Additional keyword arguments to be passed. """ del share_classification_heads del share_level_convs super().__init__(**kwargs) self._config_dict = { 'min_level': min_level, 'max_level': max_level, 'num_classes': num_classes, 'num_anchors_per_location': num_anchors_per_location, 'num_convs': num_convs, 'num_filters': num_filters, 'attribute_heads': attribute_heads, 'use_separable_conv': use_separable_conv, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, 'num_params_per_anchor': num_params_per_anchor, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the head.""" if self._config_dict['use_separable_conv']: conv_op = helper.SeparableConv2DQuantized else: conv_op = helper.quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig( ['kernel'], ['activation'], False)) conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=0.01), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) base_bn_op = (tf.keras.layers.experimental.SyncBatchNormalization if self._config_dict['use_sync_bn'] else tf.keras.layers.BatchNormalization) bn_op = helper.norm_by_activation( self._config_dict['activation'], helper.quantize_wrapped_layer( base_bn_op, configs.Default8BitOutputQuantizeConfig()), helper.quantize_wrapped_layer( base_bn_op, configs.NoOpQuantizeConfig())) bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], } # Class net. self._cls_convs = [] self._cls_norms = [] for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1): this_level_cls_norms = [] for i in range(self._config_dict['num_convs']): if level == self._config_dict['min_level']: cls_conv_name = 'classnet-conv_{}'.format(i) self._cls_convs.append(conv_op(name=cls_conv_name, **conv_kwargs)) cls_norm_name = 'classnet-conv-norm_{}_{}'.format(level, i) this_level_cls_norms.append(bn_op(name=cls_norm_name, **bn_kwargs)) self._cls_norms.append(this_level_cls_norms) classifier_kwargs = { 'filters': ( self._config_dict['num_classes'] * self._config_dict['num_anchors_per_location']), 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: classifier_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._classifier = conv_op( name='scores', last_quantize=True, **classifier_kwargs) # Box net. self._box_convs = [] self._box_norms = [] for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1): this_level_box_norms = [] for i in range(self._config_dict['num_convs']): if level == self._config_dict['min_level']: box_conv_name = 'boxnet-conv_{}'.format(i) self._box_convs.append(conv_op(name=box_conv_name, **conv_kwargs)) box_norm_name = 'boxnet-conv-norm_{}_{}'.format(level, i) this_level_box_norms.append(bn_op(name=box_norm_name, **bn_kwargs)) self._box_norms.append(this_level_box_norms) box_regressor_kwargs = { 'filters': (self._config_dict['num_params_per_anchor'] * self._config_dict['num_anchors_per_location']), 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: box_regressor_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._box_regressor = conv_op( name='boxes', last_quantize=True, **box_regressor_kwargs) # Attribute learning nets. if self._config_dict['attribute_heads']: self._att_predictors = {} self._att_convs = {} self._att_norms = {} for att_config in self._config_dict['attribute_heads']: att_name = att_config['name'] att_type = att_config['type'] att_size = att_config['size'] att_convs_i = [] att_norms_i = [] # Build conv and norm layers. for level in range(self._config_dict['min_level'], self._config_dict['max_level'] + 1): this_level_att_norms = [] for i in range(self._config_dict['num_convs']): if level == self._config_dict['min_level']: att_conv_name = '{}-conv_{}'.format(att_name, i) att_convs_i.append(conv_op(name=att_conv_name, **conv_kwargs)) att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i) this_level_att_norms.append(bn_op(name=att_norm_name, **bn_kwargs)) att_norms_i.append(this_level_att_norms) self._att_convs[att_name] = att_convs_i self._att_norms[att_name] = att_norms_i # Build the final prediction layer. att_predictor_kwargs = { 'filters': (att_size * self._config_dict['num_anchors_per_location']), 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if att_type == 'regression': att_predictor_kwargs.update( {'bias_initializer': tf.zeros_initializer()}) elif att_type == 'classification': att_predictor_kwargs.update({ 'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)) }) else: raise ValueError( 'Attribute head type {} not supported.'.format(att_type)) if not self._config_dict['use_separable_conv']: att_predictor_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._att_predictors[att_name] = conv_op( name='{}_attributes'.format(att_name), **att_predictor_kwargs) super().build(input_shape) def call(self, features: Mapping[str, tf.Tensor]): """Forward pass of the RetinaNet quantized head. Args: features: A `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor`, the feature map tensors, whose shape is [batch, height_l, width_l, channels]. Returns: scores: A `dict` of `tf.Tensor` which includes scores of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_classes * num_anchors_per_location]. boxes: A `dict` of `tf.Tensor` which includes coordinates of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_params_per_anchor * num_anchors_per_location]. attributes: a dict of (attribute_name, attribute_prediction). Each `attribute_prediction` is a dict of: - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, attribute_size * num_anchors_per_location]. Can be an empty dictionary if no attribute learning is required. """ scores = {} boxes = {} if self._config_dict['attribute_heads']: attributes = { att_config['name']: {} for att_config in self._config_dict['attribute_heads'] } else: attributes = {} for i, level in enumerate( range(self._config_dict['min_level'], self._config_dict['max_level'] + 1)): this_level_features = features[str(level)] # class net. x = this_level_features for conv, norm in zip(self._cls_convs, self._cls_norms[i]): x = conv(x) x = norm(x) x = self._activation(x) scores[str(level)] = self._classifier(x) # box net. x = this_level_features for conv, norm in zip(self._box_convs, self._box_norms[i]): x = conv(x) x = norm(x) x = self._activation(x) boxes[str(level)] = self._box_regressor(x) # attribute nets. if self._config_dict['attribute_heads']: prediction_tower_output = {} for att_config in self._config_dict['attribute_heads']: att_name = att_config['name'] def build_prediction_tower(atttribute_name, features, feature_level): x = features for conv, norm in zip( self._att_convs[atttribute_name], self._att_norms[atttribute_name][feature_level]): x = conv(x) x = norm(x) x = self._activation(x) return x prediction_tower_name = att_config['prediction_tower_name'] if not prediction_tower_name: attributes[att_name][str(level)] = self._att_predictors[att_name]( build_prediction_tower(att_name, this_level_features, i)) else: if prediction_tower_name not in prediction_tower_output: prediction_tower_output[ prediction_tower_name] = build_prediction_tower( att_name, this_level_features, i) attributes[att_name][str(level)] = self._att_predictors[att_name]( prediction_tower_output[prediction_tower_name]) return scores, boxes, attributes def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
15,779
40.09375
80
py
models
models-master/official/projects/qat/vision/tasks/retinanet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RetinaNet task definition.""" import tensorflow as tf from official.core import task_factory from official.projects.qat.vision.configs import retinanet as exp_cfg from official.projects.qat.vision.modeling import factory from official.vision.tasks import retinanet @task_factory.register_task_cls(exp_cfg.RetinaNetTask) class RetinaNetTask(retinanet.RetinaNetTask): """A task for RetinaNet object detection with QAT.""" def build_model(self) -> tf.keras.Model: """Builds RetinaNet model with QAT.""" model = super(RetinaNetTask, self).build_model() # Call the model with dummy input to build the head part. dummpy_input = tf.zeros([1] + self.task_config.model.input_size) model(dummpy_input, training=True) if self.task_config.quantization: model = factory.build_qat_retinanet( model, self.task_config.quantization, model_config=self.task_config.model) return model
1,551
36.853659
74
py
models
models-master/official/projects/qat/vision/tasks/semantic_segmentation.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Semantic segmentation task definition.""" import tensorflow as tf from official.core import task_factory from official.projects.qat.vision.configs import semantic_segmentation as exp_cfg from official.projects.qat.vision.modeling import factory from official.vision.tasks import semantic_segmentation @task_factory.register_task_cls(exp_cfg.SemanticSegmentationTask) class SemanticSegmentationTask(semantic_segmentation.SemanticSegmentationTask): """A task for semantic segmentation with QAT.""" def build_model(self) -> tf.keras.Model: """Builds semantic segmentation model with QAT.""" model = super().build_model() input_specs = tf.keras.layers.InputSpec(shape=[None] + self.task_config.model.input_size) if self.task_config.quantization: model = factory.build_qat_segmentation_model( model, self.task_config.quantization, input_specs) return model
1,552
40.972973
81
py
models
models-master/official/projects/qat/vision/tasks/image_classification.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image classification task definition.""" import tensorflow as tf from official.core import task_factory from official.projects.qat.vision.configs import image_classification as exp_cfg from official.projects.qat.vision.modeling import factory from official.vision.tasks import image_classification @task_factory.register_task_cls(exp_cfg.ImageClassificationTask) class ImageClassificationTask(image_classification.ImageClassificationTask): """A task for image classification with QAT.""" def build_model(self) -> tf.keras.Model: """Builds classification model with QAT.""" input_specs = tf.keras.layers.InputSpec( shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = super(ImageClassificationTask, self).build_model() if self.task_config.quantization: model = factory.build_qat_classification_model( model, self.task_config.quantization, input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) return model
2,047
39.96
80
py
models
models-master/official/projects/movinet/tools/export_saved_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for export_saved_model.""" from absl import flags import tensorflow as tf import tensorflow_hub as hub from official.projects.movinet.tools import export_saved_model FLAGS = flags.FLAGS class ExportSavedModelTest(tf.test.TestCase): def test_movinet_export_a0_base_with_tfhub(self): saved_model_path = self.get_temp_dir() FLAGS.export_path = saved_model_path FLAGS.model_id = 'a0' FLAGS.causal = False FLAGS.num_classes = 600 export_saved_model.main('unused_args') encoder = hub.KerasLayer(saved_model_path, trainable=True) inputs = tf.keras.layers.Input( shape=[None, None, None, 3], dtype=tf.float32) outputs = encoder(dict(image=inputs)) model = tf.keras.Model(inputs, outputs) example_input = tf.ones([1, 8, 172, 172, 3]) outputs = model(example_input) self.assertAllEqual(outputs.shape, [1, 600]) def test_movinet_export_a0_stream_with_tfhub(self): saved_model_path = self.get_temp_dir() FLAGS.export_path = saved_model_path FLAGS.model_id = 'a0' FLAGS.causal = True FLAGS.num_classes = 600 export_saved_model.main('unused_args') encoder = hub.KerasLayer(saved_model_path, trainable=True) image_input = tf.keras.layers.Input( shape=[None, None, None, 3], dtype=tf.float32, name='image') init_states_fn = encoder.resolved_object.signatures['init_states'] state_shapes = { name: ([s if s > 0 else None for s in state.shape], state.dtype) for name, state in init_states_fn(tf.constant([0, 0, 0, 0, 3])).items() } states_input = { name: tf.keras.Input(shape[1:], dtype=dtype, name=name) for name, (shape, dtype) in state_shapes.items() } inputs = {**states_input, 'image': image_input} outputs = encoder(inputs) model = tf.keras.Model(inputs, outputs) example_input = tf.ones([1, 8, 172, 172, 3]) frames = tf.split(example_input, example_input.shape[1], axis=1) init_states = init_states_fn(tf.shape(example_input)) expected_outputs, _ = model({**init_states, 'image': example_input}) states = init_states for frame in frames: outputs, states = model({**states, 'image': frame}) self.assertAllEqual(outputs.shape, [1, 600]) self.assertNotEmpty(states) self.assertAllClose(outputs, expected_outputs, 1e-5, 1e-5) def test_movinet_export_a0_stream_with_tflite(self): saved_model_path = self.get_temp_dir() FLAGS.export_path = saved_model_path FLAGS.model_id = 'a0' FLAGS.causal = True FLAGS.conv_type = '2plus1d' FLAGS.se_type = '2plus3d' FLAGS.activation = 'hard_swish' FLAGS.gating_activation = 'hard_sigmoid' FLAGS.use_positional_encoding = False FLAGS.num_classes = 600 FLAGS.batch_size = 1 FLAGS.num_frames = 1 FLAGS.image_size = 172 FLAGS.bundle_input_init_states_fn = False export_saved_model.main('unused_args') converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) tflite_model = converter.convert() interpreter = tf.lite.Interpreter(model_content=tflite_model) runner = interpreter.get_signature_runner('serving_default') def state_name(name: str) -> str: return name[len('serving_default_'):-len(':0')] init_states = { state_name(x['name']): tf.zeros(x['shape'], dtype=x['dtype']) for x in interpreter.get_input_details() } del init_states['image'] video = tf.ones([1, 8, 172, 172, 3]) clips = tf.split(video, video.shape[1], axis=1) states = init_states for clip in clips: outputs = runner(**states, image=clip) logits = outputs.pop('logits') states = outputs self.assertAllEqual(logits.shape, [1, 600]) self.assertNotEmpty(states) if __name__ == '__main__': tf.test.main()
4,451
28.879195
79
py
models
models-master/official/projects/movinet/tools/export_saved_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Exports models to tf.saved_model. Export example: ```shell python3 export_saved_model.py \ --export_path=/tmp/movinet/ \ --model_id=a0 \ --causal=True \ --conv_type="3d" \ --num_classes=600 \ --use_positional_encoding=False \ --checkpoint_path="" ``` Export for TF Lite example: ```shell python3 export_saved_model.py \ --model_id=a0 \ --causal=True \ --conv_type=2plus1d \ --se_type=2plus3d \ --activation=hard_swish \ --gating_activation=hard_sigmoid \ --use_positional_encoding=False \ --num_classes=600 \ --batch_size=1 \ --num_frames=1 \ # Use a single frame for streaming mode --image_size=172 \ # Input resolution for the model --bundle_input_init_states_fn=False \ --checkpoint_path=/path/to/checkpoint \ --export_path=/tmp/movinet_a0_stream ``` To use an exported saved_model, refer to export_saved_model_test.py. """ from typing import Optional, Tuple from absl import app from absl import flags import tensorflow as tf from official.projects.movinet.modeling import movinet from official.projects.movinet.modeling import movinet_model flags.DEFINE_string( 'export_path', '/tmp/movinet/', 'Export path to save the saved_model file.') flags.DEFINE_string( 'model_id', 'a0', 'MoViNet model name.') flags.DEFINE_bool( 'causal', False, 'Run the model in causal mode.') flags.DEFINE_string( 'conv_type', '3d', '3d, 2plus1d, or 3d_2plus1d. 3d configures the network ' 'to use the default 3D convolution. 2plus1d uses (2+1)D convolution ' 'with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes ' '3x3 followed by 5x1 conv). 3d_2plus1d uses (2+1)D convolution with ' 'Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 ' 'followed by 5x1x1 conv).') flags.DEFINE_string( 'se_type', '3d', '3d, 2d, or 2plus3d. 3d uses the default 3D spatiotemporal global average' 'pooling for squeeze excitation. 2d uses 2D spatial global average pooling ' 'on each frame. 2plus3d concatenates both 3D and 2D global average ' 'pooling.') flags.DEFINE_string( 'activation', 'swish', 'The main activation to use across layers.') flags.DEFINE_string( 'classifier_activation', 'swish', 'The classifier activation to use.') flags.DEFINE_string( 'gating_activation', 'sigmoid', 'The gating activation to use in squeeze-excitation layers.') flags.DEFINE_bool( 'use_positional_encoding', False, 'Whether to use positional encoding (only applied when causal=True).') flags.DEFINE_integer( 'num_classes', 600, 'The number of classes for prediction.') flags.DEFINE_integer( 'batch_size', None, 'The batch size of the input. Set to None for dynamic input.') flags.DEFINE_integer( 'num_frames', None, 'The number of frames of the input. Set to None for dynamic input.') flags.DEFINE_integer( 'image_size', None, 'The resolution of the input. Set to None for dynamic input.') flags.DEFINE_bool( 'bundle_input_init_states_fn', True, 'Add init_states as a function signature to the saved model.' 'This is not necessary if the input shape is static (e.g., for TF Lite).') flags.DEFINE_string( 'checkpoint_path', '', 'Checkpoint path to load. Leave blank for default initialization.') flags.DEFINE_bool( 'assert_checkpoint_objects_matched', True, 'Whether to check the checkpoint objects exactly match those of the model.', ) FLAGS = flags.FLAGS def export_saved_model( model: tf.keras.Model, input_shape: Tuple[int, int, int, int, int], export_path: str = '/tmp/movinet/', causal: bool = False, bundle_input_init_states_fn: bool = True, checkpoint_path: Optional[str] = None, assert_checkpoint_objects_matched: bool = True, ) -> None: """Exports a MoViNet model to a saved model. Args: model: the tf.keras.Model to export. input_shape: The 5D spatiotemporal input shape of size [batch_size, num_frames, image_height, image_width, num_channels]. Set the field or a shape position in the field to None for dynamic input. export_path: Export path to save the saved_model file. causal: Run the model in causal mode. bundle_input_init_states_fn: Add init_states as a function signature to the saved model. This is not necessary if the input shape is static (e.g., for TF Lite). checkpoint_path: Checkpoint path to load. Leave blank to keep the model's initialization. assert_checkpoint_objects_matched: Whether to check the checkpoint objects exactly match those of the model. """ # Use dimensions of 1 except the channels to export faster, # since we only really need the last dimension to build and get the output # states. These dimensions can be set to `None` once the model is built. input_shape_concrete = [1 if s is None else s for s in input_shape] model.build(input_shape_concrete) # Compile model to generate some internal Keras variables. model.compile() if checkpoint_path: checkpoint = tf.train.Checkpoint(model=model) status = checkpoint.restore(checkpoint_path) if assert_checkpoint_objects_matched: status.assert_existing_objects_matched() if causal: # Call the model once to get the output states. Call again with `states` # input to ensure that the inputs with the `states` argument is built # with the full output state shapes. input_image = tf.ones(input_shape_concrete) _, states = model({ **model.init_states(input_shape_concrete), 'image': input_image}) _ = model({**states, 'image': input_image}) # Create a function to explicitly set the names of the outputs def predict(inputs): outputs, states = model(inputs) return {**states, 'logits': outputs} specs = { name: tf.TensorSpec(spec.shape, name=name, dtype=spec.dtype) for name, spec in model.initial_state_specs( input_shape).items() } specs['image'] = tf.TensorSpec( input_shape, dtype=model.dtype, name='image') predict_fn = tf.function(predict, jit_compile=True) predict_fn = predict_fn.get_concrete_function(specs) init_states_fn = tf.function(model.init_states, jit_compile=True) init_states_fn = init_states_fn.get_concrete_function( tf.TensorSpec([5], dtype=tf.int32)) if bundle_input_init_states_fn: signatures = {'call': predict_fn, 'init_states': init_states_fn} else: signatures = predict_fn tf.keras.models.save_model( model, export_path, signatures=signatures) else: _ = model(tf.ones(input_shape_concrete)) tf.keras.models.save_model(model, export_path) def build_and_export_saved_model( export_path: str = '/tmp/movinet/', model_id: str = 'a0', causal: bool = False, conv_type: str = '3d', se_type: str = '3d', activation: str = 'swish', classifier_activation: str = 'swish', gating_activation: str = 'sigmoid', use_positional_encoding: bool = False, num_classes: int = 600, input_shape: Optional[Tuple[int, int, int, int, int]] = None, bundle_input_init_states_fn: bool = True, checkpoint_path: Optional[str] = None, assert_checkpoint_objects_matched: bool = True, ) -> None: """Builds and exports a MoViNet model to a saved model. Args: export_path: Export path to save the saved_model file. model_id: MoViNet model name. causal: Run the model in causal mode. conv_type: 3d, 2plus1d, or 3d_2plus1d. 3d configures the network to use the default 3D convolution. 2plus1d uses (2+1)D convolution with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes 3x3 followed by 5x1 conv). 3d_2plus1d uses (2+1)D convolution with Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 followed by 5x1x1 conv). se_type: 3d, 2d, or 2plus3d. 3d uses the default 3D spatiotemporal global average pooling for squeeze excitation. 2d uses 2D spatial global average pooling on each frame. 2plus3d concatenates both 3D and 2D global average pooling. activation: The main activation to use across layers. classifier_activation: The classifier activation to use. gating_activation: The gating activation to use in squeeze-excitation layers. use_positional_encoding: Whether to use positional encoding (only applied when causal=True). num_classes: The number of classes for prediction. input_shape: The 5D spatiotemporal input shape of size [batch_size, num_frames, image_height, image_width, num_channels]. Set the field or a shape position in the field to None for dynamic input. bundle_input_init_states_fn: Add init_states as a function signature to the saved model. This is not necessary if the input shape is static (e.g., for TF Lite). checkpoint_path: Checkpoint path to load. Leave blank for default initialization. assert_checkpoint_objects_matched: Whether to check the checkpoint objects exactly match those of the model. """ input_specs = tf.keras.layers.InputSpec(shape=input_shape) # Override swish activation implementation to remove custom gradients if activation == 'swish': activation = 'simple_swish' if classifier_activation == 'swish': classifier_activation = 'simple_swish' backbone = movinet.Movinet( model_id=model_id, causal=causal, use_positional_encoding=use_positional_encoding, conv_type=conv_type, se_type=se_type, input_specs=input_specs, activation=activation, gating_activation=gating_activation, use_sync_bn=False, use_external_states=causal) model = movinet_model.MovinetClassifier( backbone, num_classes=num_classes, output_states=causal, input_specs=dict(image=input_specs), activation=classifier_activation) export_saved_model( model=model, input_shape=input_shape, export_path=export_path, causal=causal, bundle_input_init_states_fn=bundle_input_init_states_fn, checkpoint_path=checkpoint_path, assert_checkpoint_objects_matched=assert_checkpoint_objects_matched, ) def main(_) -> None: input_shape = ( FLAGS.batch_size, FLAGS.num_frames, FLAGS.image_size, FLAGS.image_size, 3) build_and_export_saved_model( export_path=FLAGS.export_path, model_id=FLAGS.model_id, causal=FLAGS.causal, conv_type=FLAGS.conv_type, se_type=FLAGS.se_type, activation=FLAGS.activation, classifier_activation=FLAGS.classifier_activation, gating_activation=FLAGS.gating_activation, use_positional_encoding=FLAGS.use_positional_encoding, num_classes=FLAGS.num_classes, input_shape=input_shape, bundle_input_init_states_fn=FLAGS.bundle_input_init_states_fn, checkpoint_path=FLAGS.checkpoint_path, assert_checkpoint_objects_matched=FLAGS.assert_checkpoint_objects_matched, ) print(' ----- Done. Saved Model is saved at {}'.format(FLAGS.export_path)) if __name__ == '__main__': app.run(main)
11,694
36.009494
80
py
models
models-master/official/projects/movinet/tools/quantize_movinet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Generates example dataset for post-training quantization. Example command line to run the script: ```shell python3 quantize_movinet.py \ --saved_model_dir=${SAVED_MODEL_DIR} \ --saved_model_with_states_dir=${SAVED_MODEL_WITH_STATES_DIR} \ --output_dataset_dir=${OUTPUT_DATASET_DIR} \ --output_tflite=${OUTPUT_TFLITE} \ --quantization_mode='int_float_fallback' \ --save_dataset_to_tfrecords=True ``` """ import functools from typing import Any, Callable, Mapping, Optional from absl import app from absl import flags from absl import logging import numpy as np import tensorflow.compat.v2 as tf import tensorflow_hub as hub from official.vision.configs import video_classification as video_classification_configs from official.vision.tasks import video_classification tf.enable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_string( 'saved_model_dir', None, 'The saved_model directory.') flags.DEFINE_string( 'saved_model_with_states_dir', None, 'The directory to the saved_model with state signature. ' 'The saved_model_with_states is needed in order to get the initial state ' 'shape and dtype while saved_model is used for the quantization.') flags.DEFINE_string( 'output_tflite', '/tmp/output.tflite', 'The output tflite file path.') flags.DEFINE_integer( 'temporal_stride', 5, 'Temporal stride used to generate input videos.') flags.DEFINE_integer( 'num_frames', 50, 'Input videos number of frames.') flags.DEFINE_integer( 'image_size', 172, 'Input videos frame size.') flags.DEFINE_string( 'quantization_mode', None, 'The quantization mode. Can be one of "float16", "int8",' '"int_float_fallback" or None.') flags.DEFINE_integer( 'num_calibration_videos', 100, 'Number of videos to run to generate example datasets.') flags.DEFINE_integer( 'num_samples_per_video', 3, 'Number of sample draw from one single video.') flags.DEFINE_boolean( 'save_dataset_to_tfrecords', False, 'Whether to save representative dataset to the disk.') flags.DEFINE_string( 'output_dataset_dir', '/tmp/representative_dataset/', 'The directory to store exported tfrecords.') flags.DEFINE_integer( 'max_saved_files', 100, 'The maximum number of tfrecord files to save.') def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _build_tf_example(feature): return tf.train.Example( features=tf.train.Features(feature=feature)).SerializeToString() def save_to_tfrecord(input_frame: tf.Tensor, input_states: Mapping[str, tf.Tensor], frame_index: int, predictions: tf.Tensor, output_states: Mapping[str, tf.Tensor], groundtruth_label_id: tf.Tensor, output_dataset_dir: str, file_index: int): """Save results to tfrecord.""" features = {} features['frame_id'] = _int64_feature([frame_index]) features['groundtruth_label'] = _int64_feature( groundtruth_label_id.numpy().flatten().tolist()) features['predictions'] = _float_feature( predictions.numpy().flatten().tolist()) image_string = tf.io.encode_png( tf.squeeze(tf.cast(input_frame * 255., tf.uint8), axis=[0, 1])) features['image'] = _bytes_feature(image_string.numpy()) # Input/Output states at time T for k, v in output_states.items(): dtype = v[0].dtype if dtype == tf.int32: features['input/' + k] = _int64_feature( input_states[k].numpy().flatten().tolist()) features['output/' + k] = _int64_feature( output_states[k].numpy().flatten().tolist()) elif dtype == tf.float32: features['input/' + k] = _float_feature( input_states[k].numpy().flatten().tolist()) features['output/' + k] = _float_feature( output_states[k].numpy().flatten().tolist()) else: raise ValueError(f'Unrecongized dtype: {dtype}') tfe = _build_tf_example(features) record_file = '{}/movinet_stream_{:06d}.tfrecords'.format( output_dataset_dir, file_index) logging.info('Saving to %s.', record_file) with tf.io.TFRecordWriter(record_file) as writer: writer.write(tfe) def get_dataset() -> tf.data.Dataset: """Gets dataset source.""" config = video_classification_configs.video_classification_kinetics600() temporal_stride = FLAGS.temporal_stride num_frames = FLAGS.num_frames image_size = FLAGS.image_size feature_shape = (num_frames, image_size, image_size, 3) config.task.validation_data.global_batch_size = 1 config.task.validation_data.feature_shape = feature_shape config.task.validation_data.temporal_stride = temporal_stride config.task.train_data.min_image_size = int(1.125 * image_size) config.task.validation_data.dtype = 'float32' config.task.validation_data.drop_remainder = False task = video_classification.VideoClassificationTask(config.task) valid_dataset = task.build_inputs(config.task.validation_data) valid_dataset = valid_dataset.map(lambda x, y: (x['image'], y)) valid_dataset = valid_dataset.prefetch(32) return valid_dataset def stateful_representative_dataset_generator( model: tf.keras.Model, dataset_iter: Any, init_states: Mapping[str, tf.Tensor], save_dataset_to_tfrecords: bool = False, max_saved_files: int = 100, output_dataset_dir: Optional[str] = None, num_samples_per_video: int = 3, num_calibration_videos: int = 100): """Generates sample input data with states. Args: model: the inference keras model. dataset_iter: the dataset source. init_states: the initial states for the model. save_dataset_to_tfrecords: whether to save the representative dataset to tfrecords on disk. max_saved_files: the max number of saved tfrecords files. output_dataset_dir: the directory to store the saved tfrecords. num_samples_per_video: number of randomly sampled frames per video. num_calibration_videos: number of calibration videos to run. Yields: A dictionary of model inputs. """ counter = 0 for i in range(num_calibration_videos): if i % 100 == 0: logging.info('Reading representative dateset id %d.', i) example_input, example_label = next(dataset_iter) groundtruth_label_id = tf.argmax(example_label, axis=-1) input_states = init_states # split video into frames along the temporal dimension. frames = tf.split(example_input, example_input.shape[1], axis=1) random_indices = np.random.randint( low=1, high=len(frames), size=num_samples_per_video) # always include the first frame random_indices[0] = 0 random_indices = set(random_indices) for frame_index, frame in enumerate(frames): predictions, output_states = model({'image': frame, **input_states}) if frame_index in random_indices: if save_dataset_to_tfrecords and counter < max_saved_files: save_to_tfrecord( input_frame=frame, input_states=input_states, frame_index=frame_index, predictions=predictions, output_states=output_states, groundtruth_label_id=groundtruth_label_id, output_dataset_dir=output_dataset_dir, file_index=counter) yield {'image': frame, **input_states} counter += 1 # update states for the next inference step input_states = output_states def get_tflite_converter( saved_model_dir: str, quantization_mode: str, representative_dataset: Optional[Callable[..., Any]] = None ) -> tf.lite.TFLiteConverter: """Gets tflite converter.""" converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir=saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] if quantization_mode == 'float16': logging.info('Using float16 quantization.') converter.target_spec.supported_types = [tf.float16] elif quantization_mode == 'int8': logging.info('Using full interger quantization.') converter.representative_dataset = representative_dataset converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.int8 converter.inference_output_type = tf.int8 elif quantization_mode == 'int_float_fallback': logging.info('Using interger quantization with float-point fallback.') converter.representative_dataset = representative_dataset else: logging.info('Using dynamic range quantization.') return converter def quantize_movinet(dataset_fn): """Quantizes Movinet.""" valid_dataset = dataset_fn() dataset_iter = iter(valid_dataset) # Load model encoder = hub.KerasLayer(FLAGS.saved_model_with_states_dir, trainable=False) inputs = tf.keras.layers.Input( shape=[1, FLAGS.image_size, FLAGS.image_size, 3], dtype=tf.float32, name='image') # Define the state inputs, which is a dict that maps state names to tensors. init_states_fn = encoder.resolved_object.signatures['init_states'] state_shapes = { name: ([s if s > 0 else None for s in state.shape], state.dtype) for name, state in init_states_fn( tf.constant([1, 1, FLAGS.image_size, FLAGS.image_size, 3])).items() } states_input = { name: tf.keras.Input(shape[1:], dtype=dtype, name=name) for name, (shape, dtype) in state_shapes.items() } # The inputs to the model are the states and the video inputs = {**states_input, 'image': inputs} outputs = encoder(inputs) model = tf.keras.Model(inputs, outputs, name='movinet_stream') input_shape = tf.constant( [1, FLAGS.num_frames, FLAGS.image_size, FLAGS.image_size, 3]) init_states = init_states_fn(input_shape) # config representative_datset_fn representative_dataset = functools.partial( stateful_representative_dataset_generator, model=model, dataset_iter=dataset_iter, init_states=init_states, save_dataset_to_tfrecords=FLAGS.save_dataset_to_tfrecords, max_saved_files=FLAGS.max_saved_files, output_dataset_dir=FLAGS.output_dataset_dir, num_samples_per_video=FLAGS.num_samples_per_video, num_calibration_videos=FLAGS.num_calibration_videos) converter = get_tflite_converter( saved_model_dir=FLAGS.saved_model_dir, quantization_mode=FLAGS.quantization_mode, representative_dataset=representative_dataset) logging.info('Converting...') tflite_buffer = converter.convert() return tflite_buffer def main(_): tflite_buffer = quantize_movinet(dataset_fn=get_dataset) with open(FLAGS.output_tflite, 'wb') as f: f.write(tflite_buffer) logging.info('tflite model written to %s', FLAGS.output_tflite) if __name__ == '__main__': flags.mark_flag_as_required('saved_model_dir') flags.mark_flag_as_required('saved_model_with_states_dir') app.run(main)
11,994
35.129518
88
py
models
models-master/official/projects/movinet/modeling/movinet_layers.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for MoViNets. Reference: https://arxiv.org/pdf/2103.11511.pdf """ from typing import Any, Mapping, Optional, Sequence, Tuple, Union import tensorflow as tf from official.modeling import tf_utils from official.vision.modeling.layers import nn_layers # Default kernel weight decay that may be overridden KERNEL_WEIGHT_DECAY = 1.5e-5 def normalize_tuple(value: Union[int, Tuple[int, ...]], size: int, name: str): """Transforms a single integer or iterable of integers into an integer tuple. Arguments: value: The value to validate and convert. Could an int, or any iterable of ints. size: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of `size` integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * size else: try: value_tuple = tuple(value) except TypeError: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(size) + ' integers. Received: ' + str(value)) if len(value_tuple) != size: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(size) + ' integers. Received: ' + str(value)) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): raise ValueError('The `' + name + '` argument must be a tuple of ' + str(size) + ' integers. Received: ' + str(value) + ' ' 'including element ' + str(single_value) + ' of type' + ' ' + str(type(single_value))) return value_tuple @tf.keras.utils.register_keras_serializable(package='Vision') class Squeeze3D(tf.keras.layers.Layer): """Squeeze3D layer to remove singular dimensions.""" def call(self, inputs): """Calls the layer with the given inputs.""" return tf.squeeze(inputs, axis=(1, 2, 3)) @tf.keras.utils.register_keras_serializable(package='Vision') class MobileConv2D(tf.keras.layers.Layer): """Conv2D layer with extra options to support mobile devices. Reshapes 5D video tensor inputs to 4D, allowing Conv2D to run across dimensions (2, 3) or (3, 4). Reshapes tensors back to 5D when returning the output. """ def __init__( self, filters: int, kernel_size: Union[int, Sequence[int]], strides: Union[int, Sequence[int]] = (1, 1), padding: str = 'valid', data_format: Optional[str] = None, dilation_rate: Union[int, Sequence[int]] = (1, 1), groups: int = 1, use_bias: bool = True, kernel_initializer: str = 'glorot_uniform', bias_initializer: str = 'zeros', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activity_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, kernel_constraint: Optional[tf.keras.constraints.Constraint] = None, bias_constraint: Optional[tf.keras.constraints.Constraint] = None, use_depthwise: bool = False, use_temporal: bool = False, use_buffered_input: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras batch_norm_op: Optional[Any] = None, activation_op: Optional[Any] = None, **kwargs): # pylint: disable=g-doc-args """Initializes mobile conv2d. For the majority of arguments, see tf.keras.layers.Conv2D. Args: use_depthwise: if True, use DepthwiseConv2D instead of Conv2D use_temporal: if True, apply Conv2D starting from the temporal dimension instead of the spatial dimensions. use_buffered_input: if True, the input is expected to be padded beforehand. In effect, calling this layer will use 'valid' padding on the temporal dimension to simulate 'causal' padding. batch_norm_op: A callable object of batch norm layer. If None, no batch norm will be applied after the convolution. activation_op: A callabel object of activation layer. If None, no activation will be applied after the convolution. **kwargs: keyword arguments to be passed to this layer. Returns: A output tensor of the MobileConv2D operation. """ super(MobileConv2D, self).__init__(**kwargs) self._filters = filters self._kernel_size = kernel_size self._strides = strides self._padding = padding self._data_format = data_format self._dilation_rate = dilation_rate self._groups = groups self._use_bias = use_bias self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._activity_regularizer = activity_regularizer self._kernel_constraint = kernel_constraint self._bias_constraint = bias_constraint self._use_depthwise = use_depthwise self._use_temporal = use_temporal self._use_buffered_input = use_buffered_input self._batch_norm_op = batch_norm_op self._activation_op = activation_op kernel_size = normalize_tuple(kernel_size, 2, 'kernel_size') if self._use_temporal and kernel_size[1] > 1: raise ValueError('Temporal conv with spatial kernel is not supported.') if use_depthwise: self._conv = nn_layers.DepthwiseConv2D( kernel_size=kernel_size, strides=strides, padding=padding, depth_multiplier=1, data_format=data_format, dilation_rate=dilation_rate, use_bias=use_bias, depthwise_initializer=kernel_initializer, bias_initializer=bias_initializer, depthwise_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=kernel_constraint, bias_constraint=bias_constraint, use_buffered_input=use_buffered_input) else: self._conv = nn_layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, use_buffered_input=use_buffered_input) def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'filters': self._filters, 'kernel_size': self._kernel_size, 'strides': self._strides, 'padding': self._padding, 'data_format': self._data_format, 'dilation_rate': self._dilation_rate, 'groups': self._groups, 'use_bias': self._use_bias, 'kernel_initializer': self._kernel_initializer, 'bias_initializer': self._bias_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activity_regularizer': self._activity_regularizer, 'kernel_constraint': self._kernel_constraint, 'bias_constraint': self._bias_constraint, 'use_depthwise': self._use_depthwise, 'use_temporal': self._use_temporal, 'use_buffered_input': self._use_buffered_input, } base_config = super(MobileConv2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): """Calls the layer with the given inputs.""" if self._use_temporal: input_shape = [ tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2] * tf.shape(inputs)[3], inputs.shape[4]] else: input_shape = [ tf.shape(inputs)[0] * tf.shape(inputs)[1], tf.shape(inputs)[2], tf.shape(inputs)[3], inputs.shape[4]] x = tf.reshape(inputs, input_shape) x = self._conv(x) if self._batch_norm_op is not None: x = self._batch_norm_op(x) if self._activation_op is not None: x = self._activation_op(x) if self._use_temporal: output_shape = [ tf.shape(x)[0], tf.shape(x)[1], tf.shape(inputs)[2], tf.shape(inputs)[3], x.shape[3]] else: output_shape = [ tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(x)[1], tf.shape(x)[2], x.shape[3]] x = tf.reshape(x, output_shape) return x @tf.keras.utils.register_keras_serializable(package='Vision') class ConvBlock(tf.keras.layers.Layer): """A Conv followed by optional BatchNorm and Activation.""" def __init__( self, filters: int, kernel_size: Union[int, Sequence[int]], strides: Union[int, Sequence[int]] = 1, depthwise: bool = False, causal: bool = False, use_bias: bool = False, kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY), use_batch_norm: bool = True, batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, use_sync_bn: bool = False, activation: Optional[Any] = None, conv_type: str = '3d', use_buffered_input: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Initializes a conv block. Args: filters: filters for the conv operation. kernel_size: kernel size for the conv operation. strides: strides for the conv operation. depthwise: if True, use DepthwiseConv2D instead of Conv2D causal: if True, use causal mode for the conv operation. use_bias: use bias for the conv operation. kernel_initializer: kernel initializer for the conv operation. kernel_regularizer: kernel regularizer for the conv operation. use_batch_norm: if True, apply batch norm after the conv operation. batch_norm_layer: class to use for batch norm, if applied. batch_norm_momentum: momentum of the batch norm operation, if applied. batch_norm_epsilon: epsilon of the batch norm operation, if applied. use_sync_bn: if True, use synchronized batch normalization. activation: activation after the conv and batch norm operations. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. use_buffered_input: if True, the input is expected to be padded beforehand. In effect, calling this layer will use 'valid' padding on the temporal dimension to simulate 'causal' padding. **kwargs: keyword arguments to be passed to this layer. Returns: A output tensor of the ConvBlock operation. """ super(ConvBlock, self).__init__(**kwargs) kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size') strides = normalize_tuple(strides, 3, 'strides') self._filters = filters self._kernel_size = kernel_size self._strides = strides self._depthwise = depthwise self._causal = causal self._use_bias = use_bias self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._use_batch_norm = use_batch_norm self._batch_norm_layer = batch_norm_layer self._batch_norm_momentum = batch_norm_momentum self._batch_norm_epsilon = batch_norm_epsilon self._use_sync_bn = use_sync_bn self._activation = activation self._conv_type = conv_type self._use_buffered_input = use_buffered_input if activation is not None: self._activation_layer = tf_utils.get_activation( activation, use_keras_layer=True) else: self._activation_layer = None self._groups = None def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'filters': self._filters, 'kernel_size': self._kernel_size, 'strides': self._strides, 'depthwise': self._depthwise, 'causal': self._causal, 'use_bias': self._use_bias, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'use_batch_norm': self._use_batch_norm, 'batch_norm_momentum': self._batch_norm_momentum, 'batch_norm_epsilon': self._batch_norm_epsilon, 'use_sync_bn': self._use_sync_bn, 'activation': self._activation, 'conv_type': self._conv_type, 'use_buffered_input': self._use_buffered_input, } base_config = super(ConvBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Builds the layer with the given input shape.""" padding = 'causal' if self._causal else 'same' self._groups = input_shape[-1] if self._depthwise else 1 self._batch_norm = None self._batch_norm_temporal = None if self._use_batch_norm: self._batch_norm = self._batch_norm_layer( momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon, synchronized=self._use_sync_bn, name='bn') if self._conv_type != '3d' and self._kernel_size[0] > 1: self._batch_norm_temporal = self._batch_norm_layer( momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon, synchronized=self._use_sync_bn, name='bn_temporal') self._conv_temporal = None if self._conv_type == '3d_2plus1d' and self._kernel_size[0] > 1: self._conv = nn_layers.Conv3D( self._filters, (1, self._kernel_size[1], self._kernel_size[2]), strides=(1, self._strides[1], self._strides[2]), padding='same', groups=self._groups, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_buffered_input=False, name='conv3d') self._conv_temporal = nn_layers.Conv3D( self._filters, (self._kernel_size[0], 1, 1), strides=(self._strides[0], 1, 1), padding=padding, groups=self._groups, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_buffered_input=self._use_buffered_input, name='conv3d_temporal') elif self._conv_type == '2plus1d': self._conv = MobileConv2D( self._filters, (self._kernel_size[1], self._kernel_size[2]), strides=(self._strides[1], self._strides[2]), padding='same', use_depthwise=self._depthwise, groups=self._groups, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_buffered_input=False, batch_norm_op=self._batch_norm, activation_op=self._activation_layer, name='conv2d') if self._kernel_size[0] > 1: self._conv_temporal = MobileConv2D( self._filters, (self._kernel_size[0], 1), strides=(self._strides[0], 1), padding=padding, use_temporal=True, use_depthwise=self._depthwise, groups=self._groups, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_buffered_input=self._use_buffered_input, batch_norm_op=self._batch_norm_temporal, activation_op=self._activation_layer, name='conv2d_temporal') else: self._conv = nn_layers.Conv3D( self._filters, self._kernel_size, strides=self._strides, padding=padding, groups=self._groups, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_buffered_input=self._use_buffered_input, name='conv3d') super(ConvBlock, self).build(input_shape) def call(self, inputs): """Calls the layer with the given inputs.""" x = inputs # bn_op and activation_op are folded into the '2plus1d' conv layer so that # we do not explicitly call them here. # TODO(lzyuan): clean the conv layers api once the models are re-trained. x = self._conv(x) if self._batch_norm is not None and self._conv_type != '2plus1d': x = self._batch_norm(x) if self._activation_layer is not None and self._conv_type != '2plus1d': x = self._activation_layer(x) if self._conv_temporal is not None: x = self._conv_temporal(x) if self._batch_norm_temporal is not None and self._conv_type != '2plus1d': x = self._batch_norm_temporal(x) if self._activation_layer is not None and self._conv_type != '2plus1d': x = self._activation_layer(x) return x @tf.keras.utils.register_keras_serializable(package='Vision') class StreamBuffer(tf.keras.layers.Layer): """Stream buffer wrapper which caches activations of previous frames.""" def __init__(self, buffer_size: int, state_prefix: Optional[str] = None, **kwargs): """Initializes a stream buffer. Args: buffer_size: the number of input frames to cache. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. Returns: A output tensor of the StreamBuffer operation. """ super(StreamBuffer, self).__init__(**kwargs) state_prefix = state_prefix if state_prefix is not None else '' self._state_prefix = state_prefix self._state_name = f'{state_prefix}_stream_buffer' self._buffer_size = buffer_size def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'buffer_size': self._buffer_size, 'state_prefix': self._state_prefix, } base_config = super(StreamBuffer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None, ) -> Tuple[Any, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Expected keys include `state_prefix + '_stream_buffer'`. Returns: the output tensor and states """ states = dict(states) if states is not None else {} buffer = states.get(self._state_name, None) # Create the buffer if it does not exist in the states. # Output buffer shape: # [batch_size, buffer_size, input_height, input_width, num_channels] if buffer is None: shape = tf.shape(inputs) buffer = tf.zeros( [shape[0], self._buffer_size, shape[2], shape[3], shape[4]], dtype=inputs.dtype) # tf.pad has limited support for tf lite, so use tf.concat instead. full_inputs = tf.concat([buffer, inputs], axis=1) # Cache the last b frames of the input where b is the buffer size and f # is the number of input frames. If b > f, then we will cache the last b - f # frames from the previous buffer concatenated with the current f input # frames. new_buffer = full_inputs[:, -self._buffer_size:] states[self._state_name] = new_buffer return full_inputs, states @tf.keras.utils.register_keras_serializable(package='Vision') class StreamConvBlock(ConvBlock): """ConvBlock with StreamBuffer.""" def __init__( self, filters: int, kernel_size: Union[int, Sequence[int]], strides: Union[int, Sequence[int]] = 1, depthwise: bool = False, causal: bool = False, use_bias: bool = False, kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras .regularizers.L2(KERNEL_WEIGHT_DECAY), use_batch_norm: bool = True, batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, use_sync_bn: bool = False, activation: Optional[Any] = None, conv_type: str = '3d', state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Initializes a stream conv block. Args: filters: filters for the conv operation. kernel_size: kernel size for the conv operation. strides: strides for the conv operation. depthwise: if True, use DepthwiseConv2D instead of Conv2D causal: if True, use causal mode for the conv operation. use_bias: use bias for the conv operation. kernel_initializer: kernel initializer for the conv operation. kernel_regularizer: kernel regularizer for the conv operation. use_batch_norm: if True, apply batch norm after the conv operation. batch_norm_layer: class to use for batch norm, if applied. batch_norm_momentum: momentum of the batch norm operation, if applied. batch_norm_epsilon: epsilon of the batch norm operation, if applied. use_sync_bn: if True, use synchronized batch normalization. activation: activation after the conv and batch norm operations. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. Returns: A output tensor of the StreamConvBlock operation. """ kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size') buffer_size = kernel_size[0] - 1 use_buffer = buffer_size > 0 and causal self._state_prefix = state_prefix super(StreamConvBlock, self).__init__( filters, kernel_size, strides=strides, depthwise=depthwise, causal=causal, use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, use_batch_norm=use_batch_norm, batch_norm_layer=batch_norm_layer, batch_norm_momentum=batch_norm_momentum, batch_norm_epsilon=batch_norm_epsilon, use_sync_bn=use_sync_bn, activation=activation, conv_type=conv_type, use_buffered_input=use_buffer, **kwargs) self._stream_buffer = None if use_buffer: self._stream_buffer = StreamBuffer( buffer_size=buffer_size, state_prefix=state_prefix) def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = {'state_prefix': self._state_prefix} base_config = super(StreamConvBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} x = inputs # If we have no separate temporal conv, use the buffer before the 3D conv. if self._conv_temporal is None and self._stream_buffer is not None: x, states = self._stream_buffer(x, states=states) # bn_op and activation_op are folded into the '2plus1d' conv layer so that # we do not explicitly call them here. # TODO(lzyuan): clean the conv layers api once the models are re-trained. x = self._conv(x) if self._batch_norm is not None and self._conv_type != '2plus1d': x = self._batch_norm(x) if self._activation_layer is not None and self._conv_type != '2plus1d': x = self._activation_layer(x) if self._conv_temporal is not None: if self._stream_buffer is not None: # If we have a separate temporal conv, use the buffer before the # 1D conv instead (otherwise, we may waste computation on the 2D conv). x, states = self._stream_buffer(x, states=states) x = self._conv_temporal(x) if self._batch_norm_temporal is not None and self._conv_type != '2plus1d': x = self._batch_norm_temporal(x) if self._activation_layer is not None and self._conv_type != '2plus1d': x = self._activation_layer(x) return x, states @tf.keras.utils.register_keras_serializable(package='Vision') class StreamSqueezeExcitation(tf.keras.layers.Layer): """Squeeze and excitation layer with causal mode. Reference: https://arxiv.org/pdf/1709.01507.pdf """ def __init__( self, hidden_filters: int, se_type: str = '3d', activation: nn_layers.Activation = 'swish', gating_activation: nn_layers.Activation = 'sigmoid', causal: bool = False, conv_type: str = '3d', kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras .regularizers.L2(KERNEL_WEIGHT_DECAY), use_positional_encoding: bool = False, state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Implementation for squeeze and excitation. Args: hidden_filters: The hidden filters of squeeze excite. se_type: '3d', '2d', or '2plus3d'. '3d' uses the default 3D spatiotemporal global average pooling for squeeze excitation. '2d' uses 2D spatial global average pooling on each frame. '2plus3d' concatenates both 3D and 2D global average pooling. activation: name of the activation function. gating_activation: name of the activation function for gating. causal: if True, use causal mode in the global average pool. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv operation. use_positional_encoding: add a positional encoding after the (cumulative) global average pooling layer. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. """ super(StreamSqueezeExcitation, self).__init__(**kwargs) self._hidden_filters = hidden_filters self._se_type = se_type self._activation = activation self._gating_activation = gating_activation self._causal = causal self._conv_type = conv_type self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._use_positional_encoding = use_positional_encoding self._state_prefix = state_prefix self._spatiotemporal_pool = nn_layers.GlobalAveragePool3D( keepdims=True, causal=causal, state_prefix=state_prefix) self._spatial_pool = nn_layers.SpatialAveragePool3D(keepdims=True) self._pos_encoding = None if use_positional_encoding: self._pos_encoding = nn_layers.PositionalEncoding( initializer='zeros', state_prefix=state_prefix) def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'hidden_filters': self._hidden_filters, 'se_type': self._se_type, 'activation': self._activation, 'gating_activation': self._gating_activation, 'causal': self._causal, 'conv_type': self._conv_type, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'use_positional_encoding': self._use_positional_encoding, 'state_prefix': self._state_prefix, } base_config = super(StreamSqueezeExcitation, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Builds the layer with the given input shape.""" self._se_reduce = ConvBlock( filters=self._hidden_filters, kernel_size=1, causal=self._causal, use_bias=True, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_batch_norm=False, activation=self._activation, conv_type=self._conv_type, name='se_reduce') self._se_expand = ConvBlock( filters=input_shape[-1], kernel_size=1, causal=self._causal, use_bias=True, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_batch_norm=False, activation=self._gating_activation, conv_type=self._conv_type, name='se_expand') super(StreamSqueezeExcitation, self).build(input_shape) def call(self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} if self._se_type == '3d': x, states = self._spatiotemporal_pool( inputs, states=states, output_states=True) elif self._se_type == '2d': x = self._spatial_pool(inputs) elif self._se_type == '2plus3d': x_space = self._spatial_pool(inputs) x, states = self._spatiotemporal_pool( x_space, states=states, output_states=True) if not self._causal: x = tf.tile(x, [1, tf.shape(inputs)[1], 1, 1, 1]) x = tf.concat([x, x_space], axis=-1) else: raise ValueError('Unknown Squeeze Excitation type {}'.format( self._se_type)) if self._pos_encoding is not None: x, states = self._pos_encoding(x, states=states) x = self._se_reduce(x) x = self._se_expand(x) return x * inputs, states @tf.keras.utils.register_keras_serializable(package='Vision') class MobileBottleneck(tf.keras.layers.Layer): """A depthwise inverted bottleneck block. Uses dependency injection to allow flexible definition of different layers within this block. """ def __init__(self, expansion_layer: tf.keras.layers.Layer, feature_layer: tf.keras.layers.Layer, projection_layer: tf.keras.layers.Layer, attention_layer: Optional[tf.keras.layers.Layer] = None, skip_layer: Optional[tf.keras.layers.Layer] = None, stochastic_depth_drop_rate: Optional[float] = None, **kwargs): """Implementation for mobile bottleneck. Args: expansion_layer: initial layer used for pointwise expansion. feature_layer: main layer used for computing 3D features. projection_layer: layer used for pointwise projection. attention_layer: optional layer used for attention-like operations (e.g., squeeze excite). skip_layer: optional skip layer used to project the input before summing with the output for the residual connection. stochastic_depth_drop_rate: optional drop rate for stochastic depth. **kwargs: keyword arguments to be passed to this layer. """ super(MobileBottleneck, self).__init__(**kwargs) self._projection_layer = projection_layer self._attention_layer = attention_layer self._skip_layer = skip_layer self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._identity = tf.keras.layers.Activation(tf.identity) self._rezero = nn_layers.Scale(initializer='zeros', name='rezero') if stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( stochastic_depth_drop_rate, name='stochastic_depth') else: self._stochastic_depth = None self._feature_layer = feature_layer self._expansion_layer = expansion_layer def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, } base_config = super(MobileBottleneck, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} x = self._expansion_layer(inputs) x, states = self._feature_layer(x, states=states) if self._attention_layer is not None: x, states = self._attention_layer(x, states=states) x = self._projection_layer(x) # Add identity so that the ops are ordered as written. This is useful for, # e.g., quantization. x = self._identity(x) x = self._rezero(x) if self._stochastic_depth is not None: x = self._stochastic_depth(x) if self._skip_layer is not None: skip = self._skip_layer(inputs) else: skip = inputs return x + skip, states @tf.keras.utils.register_keras_serializable(package='Vision') class SkipBlock(tf.keras.layers.Layer): """Skip block for bottleneck blocks.""" def __init__( self, out_filters: int, downsample: bool = False, conv_type: str = '3d', kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY), batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, # pytype: disable=annotation-type-mismatch # typed-keras use_sync_bn: bool = False, **kwargs): """Implementation for skip block. Args: out_filters: the number of projected output filters. downsample: if True, downsamples the input by a factor of 2 by applying average pooling with a 3x3 kernel size on the spatial dimensions. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv projection. batch_norm_layer: class to use for batch norm. batch_norm_momentum: momentum of the batch norm operation. batch_norm_epsilon: epsilon of the batch norm operation. use_sync_bn: if True, use synchronized batch normalization. **kwargs: keyword arguments to be passed to this layer. """ super(SkipBlock, self).__init__(**kwargs) self._out_filters = out_filters self._downsample = downsample self._conv_type = conv_type self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._batch_norm_layer = batch_norm_layer self._batch_norm_momentum = batch_norm_momentum self._batch_norm_epsilon = batch_norm_epsilon self._use_sync_bn = use_sync_bn self._projection = ConvBlock( filters=self._out_filters, kernel_size=1, conv_type=conv_type, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, name='skip_project') if downsample: if self._conv_type == '2plus1d': self._pool = tf.keras.layers.AveragePooling2D( pool_size=(3, 3), strides=(2, 2), padding='same', name='skip_pool') else: self._pool = tf.keras.layers.AveragePooling3D( pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same', name='skip_pool') else: self._pool = None def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'out_filters': self._out_filters, 'downsample': self._downsample, 'conv_type': self._conv_type, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'batch_norm_momentum': self._batch_norm_momentum, 'batch_norm_epsilon': self._batch_norm_epsilon, 'use_sync_bn': self._use_sync_bn } base_config = super(SkipBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): """Calls the layer with the given inputs.""" x = inputs if self._pool is not None: if self._conv_type == '2plus1d': x = tf.reshape(x, [-1, tf.shape(x)[2], tf.shape(x)[3], x.shape[4]]) x = self._pool(x) if self._conv_type == '2plus1d': x = tf.reshape( x, [tf.shape(inputs)[0], -1, tf.shape(x)[1], tf.shape(x)[2], x.shape[3]]) return self._projection(x) @tf.keras.utils.register_keras_serializable(package='Vision') class MovinetBlock(tf.keras.layers.Layer): """A basic block for MoViNets. Applies a mobile inverted bottleneck with pointwise expansion, 3D depthwise convolution, 3D squeeze excite, pointwise projection, and residual connection. """ def __init__( self, out_filters: int, expand_filters: int, kernel_size: Union[int, Sequence[int]] = (3, 3, 3), strides: Union[int, Sequence[int]] = (1, 1, 1), causal: bool = False, activation: nn_layers.Activation = 'swish', gating_activation: nn_layers.Activation = 'sigmoid', se_ratio: float = 0.25, stochastic_depth_drop_rate: float = 0., conv_type: str = '3d', se_type: str = '3d', use_positional_encoding: bool = False, kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras .regularizers.L2(KERNEL_WEIGHT_DECAY), batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, use_sync_bn: bool = False, state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Implementation for MoViNet block. Args: out_filters: number of output filters for the final projection. expand_filters: number of expansion filters after the input. kernel_size: kernel size of the main depthwise convolution. strides: strides of the main depthwise convolution. causal: if True, run the temporal convolutions in causal mode. activation: activation to use across all conv operations. gating_activation: gating activation to use in squeeze excitation layers. se_ratio: squeeze excite filters ratio. stochastic_depth_drop_rate: optional drop rate for stochastic depth. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. se_type: '3d', '2d', or '2plus3d'. '3d' uses the default 3D spatiotemporal global average pooling for squeeze excitation. '2d' uses 2D spatial global average pooling on each frame. '2plus3d' concatenates both 3D and 2D global average pooling. use_positional_encoding: add a positional encoding after the (cumulative) global average pooling layer in the squeeze excite layer. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv operations. batch_norm_layer: class to use for batch norm. batch_norm_momentum: momentum of the batch norm operation. batch_norm_epsilon: epsilon of the batch norm operation. use_sync_bn: if True, use synchronized batch normalization. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. """ super(MovinetBlock, self).__init__(**kwargs) self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size') self._strides = normalize_tuple(strides, 3, 'strides') # Use a multiplier of 2 if concatenating multiple features se_multiplier = 2 if se_type == '2plus3d' else 1 se_hidden_filters = nn_layers.make_divisible( se_ratio * expand_filters * se_multiplier, divisor=8) self._out_filters = out_filters self._expand_filters = expand_filters self._causal = causal self._activation = activation self._gating_activation = gating_activation self._se_ratio = se_ratio self._downsample = any(s > 1 for s in self._strides) self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._conv_type = conv_type self._se_type = se_type self._use_positional_encoding = use_positional_encoding self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._batch_norm_layer = batch_norm_layer self._batch_norm_momentum = batch_norm_momentum self._batch_norm_epsilon = batch_norm_epsilon self._use_sync_bn = use_sync_bn self._state_prefix = state_prefix self._expansion = ConvBlock( expand_filters, (1, 1, 1), activation=activation, conv_type=conv_type, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, name='expansion') self._feature = StreamConvBlock( expand_filters, self._kernel_size, strides=self._strides, depthwise=True, causal=self._causal, activation=activation, conv_type=conv_type, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, state_prefix=state_prefix, name='feature') self._projection = ConvBlock( out_filters, (1, 1, 1), activation=None, conv_type=conv_type, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, name='projection') self._attention = None if se_type != 'none': self._attention = StreamSqueezeExcitation( se_hidden_filters, se_type=se_type, activation=activation, gating_activation=gating_activation, causal=self._causal, conv_type=conv_type, use_positional_encoding=use_positional_encoding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, state_prefix=state_prefix, name='se') def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'out_filters': self._out_filters, 'expand_filters': self._expand_filters, 'kernel_size': self._kernel_size, 'strides': self._strides, 'causal': self._causal, 'activation': self._activation, 'gating_activation': self._gating_activation, 'se_ratio': self._se_ratio, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'conv_type': self._conv_type, 'se_type': self._se_type, 'use_positional_encoding': self._use_positional_encoding, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'batch_norm_momentum': self._batch_norm_momentum, 'batch_norm_epsilon': self._batch_norm_epsilon, 'use_sync_bn': self._use_sync_bn, 'state_prefix': self._state_prefix, } base_config = super(MovinetBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Builds the layer with the given input shape.""" if input_shape[-1] == self._out_filters and not self._downsample: self._skip = None else: self._skip = SkipBlock( self._out_filters, downsample=self._downsample, conv_type=self._conv_type, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, name='skip') self._mobile_bottleneck = MobileBottleneck( self._expansion, self._feature, self._projection, attention_layer=self._attention, skip_layer=self._skip, stochastic_depth_drop_rate=self._stochastic_depth_drop_rate, name='bneck') super(MovinetBlock, self).build(input_shape) def call(self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} return self._mobile_bottleneck(inputs, states=states) @tf.keras.utils.register_keras_serializable(package='Vision') class Stem(tf.keras.layers.Layer): """Stem layer for video networks. Applies an initial convolution block operation. """ def __init__( self, out_filters: int, kernel_size: Union[int, Sequence[int]], strides: Union[int, Sequence[int]] = (1, 1, 1), causal: bool = False, conv_type: str = '3d', activation: nn_layers.Activation = 'swish', kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras .regularizers.L2(KERNEL_WEIGHT_DECAY), batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, use_sync_bn: bool = False, state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Implementation for video model stem. Args: out_filters: number of output filters. kernel_size: kernel size of the convolution. strides: strides of the convolution. causal: if True, run the temporal convolutions in causal mode. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. activation: the input activation name. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv operations. batch_norm_layer: class to use for batch norm. batch_norm_momentum: momentum of the batch norm operation. batch_norm_epsilon: epsilon of the batch norm operation. use_sync_bn: if True, use synchronized batch normalization. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. """ super(Stem, self).__init__(**kwargs) self._out_filters = out_filters self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size') self._strides = normalize_tuple(strides, 3, 'strides') self._causal = causal self._conv_type = conv_type self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._batch_norm_layer = batch_norm_layer self._batch_norm_momentum = batch_norm_momentum self._batch_norm_epsilon = batch_norm_epsilon self._use_sync_bn = use_sync_bn self._state_prefix = state_prefix self._stem = StreamConvBlock( filters=self._out_filters, kernel_size=self._kernel_size, strides=self._strides, causal=self._causal, activation=self._activation, conv_type=self._conv_type, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, state_prefix=self._state_prefix, name='stem') def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'out_filters': self._out_filters, 'kernel_size': self._kernel_size, 'strides': self._strides, 'causal': self._causal, 'activation': self._activation, 'conv_type': self._conv_type, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'batch_norm_momentum': self._batch_norm_momentum, 'batch_norm_epsilon': self._batch_norm_epsilon, 'use_sync_bn': self._use_sync_bn, 'state_prefix': self._state_prefix, } base_config = super(Stem, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, states: Optional[nn_layers.States] = None ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} return self._stem(inputs, states=states) @tf.keras.utils.register_keras_serializable(package='Vision') class Head(tf.keras.layers.Layer): """Head layer for video networks. Applies pointwise projection and global pooling. """ def __init__( self, project_filters: int, conv_type: str = '3d', activation: nn_layers.Activation = 'swish', kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras .regularizers.L2(KERNEL_WEIGHT_DECAY), batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.BatchNormalization, batch_norm_momentum: float = 0.99, batch_norm_epsilon: float = 1e-3, use_sync_bn: bool = False, average_pooling_type: str = '3d', state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Implementation for video model head. Args: project_filters: number of pointwise projection filters. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. activation: the input activation name. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv operations. batch_norm_layer: class to use for batch norm. batch_norm_momentum: momentum of the batch norm operation. batch_norm_epsilon: epsilon of the batch norm operation. use_sync_bn: if True, use synchronized batch normalization. average_pooling_type: The average pooling type. Currently supporting ['3d', '2d', 'none']. state_prefix: a prefix string to identify states. **kwargs: keyword arguments to be passed to this layer. """ super(Head, self).__init__(**kwargs) self._project_filters = project_filters self._conv_type = conv_type self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._batch_norm_layer = batch_norm_layer self._batch_norm_momentum = batch_norm_momentum self._batch_norm_epsilon = batch_norm_epsilon self._use_sync_bn = use_sync_bn self._state_prefix = state_prefix self._project = ConvBlock( filters=project_filters, kernel_size=1, activation=activation, conv_type=conv_type, kernel_regularizer=kernel_regularizer, use_batch_norm=True, batch_norm_layer=self._batch_norm_layer, batch_norm_momentum=self._batch_norm_momentum, batch_norm_epsilon=self._batch_norm_epsilon, use_sync_bn=self._use_sync_bn, name='project') if average_pooling_type.lower() == '3d': self._pool = nn_layers.GlobalAveragePool3D( keepdims=True, causal=False, state_prefix=state_prefix) elif average_pooling_type.lower() == '2d': self._pool = nn_layers.SpatialAveragePool3D(keepdims=True) elif average_pooling_type == 'none': self._pool = None else: raise ValueError( '%s average_pooling_type is not supported.' % average_pooling_type) def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'project_filters': self._project_filters, 'conv_type': self._conv_type, 'activation': self._activation, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'batch_norm_momentum': self._batch_norm_momentum, 'batch_norm_epsilon': self._batch_norm_epsilon, 'use_sync_bn': self._use_sync_bn, 'state_prefix': self._state_prefix, } base_config = super(Head, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call( self, inputs: Union[tf.Tensor, Mapping[str, tf.Tensor]], states: Optional[nn_layers.States] = None, ) -> Tuple[tf.Tensor, nn_layers.States]: """Calls the layer with the given inputs. Args: inputs: the input tensor or dict of endpoints. states: a dict of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Returns: the output tensor and states """ states = dict(states) if states is not None else {} x = self._project(inputs) if self._pool is not None: outputs = self._pool(x, states=states, output_states=True) else: outputs = (x, states) return outputs @tf.keras.utils.register_keras_serializable(package='Vision') class ClassifierHead(tf.keras.layers.Layer): """Head layer for video networks. Applies dense projection, dropout, and classifier projection. Expects input to be pooled vector with shape [batch_size, 1, 1, 1, num_channels] """ def __init__( self, head_filters: int, num_classes: int, dropout_rate: float = 0., conv_type: str = '3d', activation: nn_layers.Activation = 'swish', output_activation: Optional[nn_layers.Activation] = None, max_pool_predictions: bool = False, kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY), # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Implementation for video model classifier head. Args: head_filters: number of dense head projection filters. num_classes: number of output classes for the final logits. dropout_rate: the dropout rate applied to the head projection. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D ops. '2plus1d' split any 3D ops into two sequential 2D ops with their own batch norm and activation. '3d_2plus1d' is like '2plus1d', but uses two sequential 3D ops instead. activation: the input activation name. output_activation: optional final activation (e.g., 'softmax'). max_pool_predictions: apply temporal softmax pooling to predictions. Intended for multi-label prediction, where multiple labels are distributed across the video. Currently only supports single clips. kernel_initializer: kernel initializer for the conv operations. kernel_regularizer: kernel regularizer for the conv operations. **kwargs: keyword arguments to be passed to this layer. """ super(ClassifierHead, self).__init__(**kwargs) self._head_filters = head_filters self._num_classes = num_classes self._dropout_rate = dropout_rate self._conv_type = conv_type self._activation = activation self._output_activation = output_activation self._max_pool_predictions = max_pool_predictions self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._dropout = tf.keras.layers.Dropout(dropout_rate) self._head = ConvBlock( filters=head_filters, kernel_size=1, activation=activation, use_bias=True, use_batch_norm=False, conv_type=conv_type, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='head') self._classifier = ConvBlock( filters=num_classes, kernel_size=1, kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01), kernel_regularizer=None, use_bias=True, use_batch_norm=False, conv_type=conv_type, name='classifier') self._max_pool = nn_layers.TemporalSoftmaxPool() self._squeeze = Squeeze3D() output_activation = output_activation if output_activation else 'linear' self._cast = tf.keras.layers.Activation( output_activation, dtype='float32', name='cast') def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'head_filters': self._head_filters, 'num_classes': self._num_classes, 'dropout_rate': self._dropout_rate, 'conv_type': self._conv_type, 'activation': self._activation, 'output_activation': self._output_activation, 'max_pool_predictions': self._max_pool_predictions, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, } base_config = super(ClassifierHead, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor) -> tf.Tensor: """Calls the layer with the given inputs.""" # Input Shape: [batch_size, 1, 1, 1, input_channels] x = inputs x = self._head(x) if self._dropout_rate and self._dropout_rate > 0: x = self._dropout(x) x = self._classifier(x) if self._max_pool_predictions: x = self._max_pool(x) x = self._squeeze(x) x = self._cast(x) return x
62,639
37.979465
111
py
models
models-master/official/projects/movinet/modeling/movinet_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build Movinet for video classification. Reference: https://arxiv.org/pdf/2103.11511.pdf """ from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union from absl import logging import tensorflow as tf from official.projects.movinet.configs import movinet as cfg from official.projects.movinet.modeling import movinet_layers from official.vision.modeling import backbones from official.vision.modeling import factory_3d as model_factory @tf.keras.utils.register_keras_serializable(package='Vision') class MovinetClassifier(tf.keras.Model): """A video classification class builder.""" def __init__( self, backbone: tf.keras.Model, num_classes: int, input_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, activation: str = 'swish', dropout_rate: float = 0.0, kernel_initializer: str = 'HeNormal', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, output_states: bool = False, **kwargs): """Movinet initialization function. Args: backbone: A 3d backbone network. num_classes: Number of classes in classification task. input_specs: Specs of the input tensor. activation: name of the main activation function. dropout_rate: Rate for dropout regularization. kernel_initializer: Kernel initializer for the final dense layer. kernel_regularizer: Kernel regularizer. bias_regularizer: Bias regularizer. output_states: if True, output intermediate states that can be used to run the model in streaming mode. Inputting the output states of the previous input clip with the current input clip will utilize a stream buffer for streaming video. **kwargs: Keyword arguments to be passed. """ if not input_specs: input_specs = { 'image': tf.keras.layers.InputSpec(shape=[None, None, None, None, 3]) } self._num_classes = num_classes self._input_specs = input_specs self._activation = activation self._dropout_rate = dropout_rate self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._output_states = output_states state_specs = None if backbone.use_external_states: state_specs = backbone.initial_state_specs( input_shape=input_specs['image'].shape) inputs, outputs = self._build_network( backbone, input_specs, state_specs=state_specs) super(MovinetClassifier, self).__init__( inputs=inputs, outputs=outputs, **kwargs) # Move backbone after super() call so Keras is happy self._backbone = backbone def _build_backbone( self, backbone: tf.keras.Model, input_specs: Mapping[str, tf.keras.layers.InputSpec], state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, ) -> Tuple[Mapping[str, Any], Any, Any]: """Builds the backbone network and gets states and endpoints. Args: backbone: the model backbone. input_specs: the model input spec to use. state_specs: a dict of states such that, if any of the keys match for a layer, will overwrite the contents of the buffer(s). Returns: inputs: a dict of input specs. endpoints: a dict of model endpoints. states: a dict of model states. """ state_specs = state_specs if state_specs is not None else {} states = { name: tf.keras.Input(shape=spec.shape[1:], dtype=spec.dtype, name=name) for name, spec in state_specs.items() } image = tf.keras.Input(shape=input_specs['image'].shape[1:], name='image') inputs = {**states, 'image': image} if backbone.use_external_states: before_states = states endpoints, states = backbone(inputs) after_states = states new_states = set(after_states) - set(before_states) if new_states: raise ValueError( 'Expected input and output states to be the same. Got extra states ' '{}, expected {}'.format(new_states, set(before_states))) mismatched_shapes = {} for name in after_states: before_shape = before_states[name].shape after_shape = after_states[name].shape if len(before_shape) != len(after_shape): mismatched_shapes[name] = (before_shape, after_shape) continue for before, after in zip(before_shape, after_shape): if before is not None and after is not None and before != after: mismatched_shapes[name] = (before_shape, after_shape) break if mismatched_shapes: raise ValueError( 'Got mismatched input and output state shapes: {}'.format( mismatched_shapes)) else: endpoints, states = backbone(inputs) return inputs, endpoints, states def _build_network( self, backbone: tf.keras.Model, input_specs: Mapping[str, tf.keras.layers.InputSpec], state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, ) -> Tuple[Mapping[str, tf.keras.Input], Union[Tuple[Mapping[ # pytype: disable=invalid-annotation # typed-keras str, tf.Tensor], Mapping[str, tf.Tensor]], Mapping[str, tf.Tensor]]]: """Builds the model network. Args: backbone: the model backbone. input_specs: the model input spec to use. state_specs: a dict of states such that, if any of the keys match for a layer, will overwrite the contents of the buffer(s). Returns: Inputs and outputs as a tuple. Inputs are expected to be a dict with base input and states. Outputs are expected to be a dict of endpoints and (optionally) output states. """ inputs, endpoints, states = self._build_backbone( backbone=backbone, input_specs=input_specs, state_specs=state_specs) x = endpoints['head'] x = movinet_layers.ClassifierHead( head_filters=backbone.head_filters, num_classes=self._num_classes, dropout_rate=self._dropout_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, conv_type=backbone.conv_type, activation=self._activation)( x) outputs = (x, states) if self._output_states else x return inputs, outputs def initial_state_specs( self, input_shape: Sequence[int]) -> Dict[str, tf.keras.layers.InputSpec]: return self._backbone.initial_state_specs(input_shape=input_shape) @tf.function def init_states(self, input_shape: Sequence[int]) -> Dict[str, tf.Tensor]: """Returns initial states for the first call in steaming mode.""" return self._backbone.init_states(input_shape) @property def checkpoint_items(self) -> Dict[str, Any]: """Returns a dictionary of items to be additionally checkpointed.""" return dict(backbone=self.backbone) @property def backbone(self) -> tf.keras.Model: """Returns the backbone of the model.""" return self._backbone def get_config(self): config = { 'backbone': self._backbone, 'activation': self._activation, 'num_classes': self._num_classes, 'input_specs': self._input_specs, 'dropout_rate': self._dropout_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'output_states': self._output_states, } return config @classmethod def from_config(cls, config, custom_objects=None): # Each InputSpec may need to be deserialized # This handles the case where we want to load a saved_model loaded with # `tf.keras.models.load_model` if config['input_specs']: for name in config['input_specs']: if isinstance(config['input_specs'][name], dict): config['input_specs'][name] = tf.keras.layers.deserialize( config['input_specs'][name]) return cls(**config) @model_factory.register_model_builder('movinet') def build_movinet_model( input_specs: Mapping[str, tf.keras.layers.InputSpec], model_config: cfg.MovinetModel, num_classes: int, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None): """Builds movinet model.""" logging.info('Building movinet model with num classes: %s', num_classes) if l2_regularizer is not None: logging.info('Building movinet model with regularizer: %s', l2_regularizer.get_config()) input_specs_dict = {'image': input_specs} backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=model_config.norm_activation, l2_regularizer=l2_regularizer) model = MovinetClassifier( backbone, num_classes=num_classes, kernel_regularizer=l2_regularizer, input_specs=input_specs_dict, activation=model_config.activation, dropout_rate=model_config.dropout_rate, output_states=model_config.output_states) return model
9,800
36.551724
116
py
models
models-master/official/projects/movinet/modeling/movinet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of Mobile Video Networks. Reference: https://arxiv.org/pdf/2103.11511.pdf """ import dataclasses import math from typing import Dict, Mapping, Optional, Sequence, Tuple, Union from absl import logging import tensorflow as tf from official.modeling import hyperparams from official.projects.movinet.modeling import movinet_layers from official.vision.modeling.backbones import factory # Defines a set of kernel sizes and stride sizes to simplify and shorten # architecture definitions for configs below. KernelSize = Tuple[int, int, int] # K(ab) represents a 3D kernel of size (a, b, b) K13: KernelSize = (1, 3, 3) K15: KernelSize = (1, 5, 5) K33: KernelSize = (3, 3, 3) K53: KernelSize = (5, 3, 3) # S(ab) represents a 3D stride of size (a, b, b) S11: KernelSize = (1, 1, 1) S12: KernelSize = (1, 2, 2) S22: KernelSize = (2, 2, 2) S21: KernelSize = (2, 1, 1) # Type for a state container (map) TensorMap = Mapping[str, tf.Tensor] @dataclasses.dataclass class BlockSpec: """Configuration of a block.""" @dataclasses.dataclass class StemSpec(BlockSpec): """Configuration of a Movinet block.""" filters: int = 0 kernel_size: KernelSize = (0, 0, 0) strides: KernelSize = (0, 0, 0) @dataclasses.dataclass class MovinetBlockSpec(BlockSpec): """Configuration of a Movinet block.""" base_filters: int = 0 expand_filters: Sequence[int] = () kernel_sizes: Sequence[KernelSize] = () strides: Sequence[KernelSize] = () @dataclasses.dataclass class HeadSpec(BlockSpec): """Configuration of a Movinet block.""" project_filters: int = 0 head_filters: int = 0 # Block specs specify the architecture of each model BLOCK_SPECS = { 'a0': ( StemSpec(filters=8, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=8, expand_filters=(24,), kernel_sizes=(K15,), strides=(S12,)), MovinetBlockSpec( base_filters=32, expand_filters=(80, 80, 80), kernel_sizes=(K33, K33, K33), strides=(S12, S11, S11)), MovinetBlockSpec( base_filters=56, expand_filters=(184, 112, 184), kernel_sizes=(K53, K33, K33), strides=(S12, S11, S11)), MovinetBlockSpec( base_filters=56, expand_filters=(184, 184, 184, 184), kernel_sizes=(K53, K33, K33, K33), strides=(S11, S11, S11, S11)), MovinetBlockSpec( base_filters=104, expand_filters=(384, 280, 280, 344), kernel_sizes=(K53, K15, K15, K15), strides=(S12, S11, S11, S11)), HeadSpec(project_filters=480, head_filters=2048), ), 'a1': ( StemSpec(filters=16, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=16, expand_filters=(40, 40), kernel_sizes=(K15, K33), strides=(S12, S11)), MovinetBlockSpec( base_filters=40, expand_filters=(96, 120, 96, 96), kernel_sizes=(K33, K33, K33, K33), strides=(S12, S11, S11, S11)), MovinetBlockSpec( base_filters=64, expand_filters=(216, 128, 216, 168, 216), kernel_sizes=(K53, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=64, expand_filters=(216, 216, 216, 128, 128, 216), kernel_sizes=(K53, K33, K33, K33, K15, K33), strides=(S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=136, expand_filters=(456, 360, 360, 360, 456, 456, 544), kernel_sizes=(K53, K15, K15, K15, K15, K33, K13), strides=(S12, S11, S11, S11, S11, S11, S11)), HeadSpec(project_filters=600, head_filters=2048), ), 'a2': ( StemSpec(filters=16, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=16, expand_filters=(40, 40, 64), kernel_sizes=(K15, K33, K33), strides=(S12, S11, S11)), MovinetBlockSpec( base_filters=40, expand_filters=(96, 120, 96, 96, 120), kernel_sizes=(K33, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=72, expand_filters=(240, 160, 240, 192, 240), kernel_sizes=(K53, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=72, expand_filters=(240, 240, 240, 240, 144, 240), kernel_sizes=(K53, K33, K33, K33, K15, K33), strides=(S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=144, expand_filters=(480, 384, 384, 480, 480, 480, 576), kernel_sizes=(K53, K15, K15, K15, K15, K33, K13), strides=(S12, S11, S11, S11, S11, S11, S11)), HeadSpec(project_filters=640, head_filters=2048), ), 'a3': ( StemSpec(filters=16, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=16, expand_filters=(40, 40, 64, 40), kernel_sizes=(K15, K33, K33, K33), strides=(S12, S11, S11, S11)), MovinetBlockSpec( base_filters=48, expand_filters=(112, 144, 112, 112, 144, 144), kernel_sizes=(K33, K33, K33, K15, K33, K33), strides=(S12, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=80, expand_filters=(240, 152, 240, 192, 240), kernel_sizes=(K53, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=88, expand_filters=(264, 264, 264, 264, 160, 264, 264, 264), kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33), strides=(S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=168, expand_filters=(560, 448, 448, 560, 560, 560, 448, 448, 560, 672), kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K33, K13), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11)), HeadSpec(project_filters=744, head_filters=2048), ), 'a4': ( StemSpec(filters=24, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=24, expand_filters=(64, 64, 96, 64, 96, 64), kernel_sizes=(K15, K33, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=56, expand_filters=(168, 168, 136, 136, 168, 168, 168, 136, 136), kernel_sizes=(K33, K33, K33, K33, K33, K33, K33, K15, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=96, expand_filters=(320, 160, 320, 192, 320, 160, 320, 256, 320), kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=96, expand_filters=(320, 320, 320, 320, 192, 320, 320, 192, 320, 320), kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33), strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=192, expand_filters=(640, 512, 512, 640, 640, 640, 512, 512, 640, 768, 640, 640, 768), kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K15, K15, K15, K33, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), HeadSpec(project_filters=856, head_filters=2048), ), 'a5': ( StemSpec(filters=24, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=24, expand_filters=(64, 64, 96, 64, 96, 64), kernel_sizes=(K15, K15, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=64, expand_filters=(192, 152, 152, 152, 192, 192, 192, 152, 152, 192, 192), kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=112, expand_filters=(376, 224, 376, 376, 296, 376, 224, 376, 376, 296, 376, 376, 376), kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=120, expand_filters=(376, 376, 376, 376, 224, 376, 376, 224, 376, 376, 376), kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33, K33), strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), MovinetBlockSpec( base_filters=224, expand_filters=(744, 744, 600, 600, 744, 744, 744, 896, 600, 600, 896, 744, 744, 896, 600, 600, 744, 744), kernel_sizes=(K53, K33, K15, K15, K15, K15, K33, K15, K15, K15, K15, K15, K33, K15, K15, K15, K15, K33), strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)), HeadSpec(project_filters=992, head_filters=2048), ), 't0': ( StemSpec(filters=8, kernel_size=K13, strides=S12), MovinetBlockSpec( base_filters=8, expand_filters=(16,), kernel_sizes=(K15,), strides=(S12,)), MovinetBlockSpec( base_filters=32, expand_filters=(72, 72), kernel_sizes=(K33, K15), strides=(S12, S11)), MovinetBlockSpec( base_filters=56, expand_filters=(112, 112, 112), kernel_sizes=(K53, K15, K33), strides=(S12, S11, S11)), MovinetBlockSpec( base_filters=56, expand_filters=(184, 184, 184, 184), kernel_sizes=(K53, K15, K33, K33), strides=(S11, S11, S11, S11)), MovinetBlockSpec( base_filters=104, expand_filters=(344, 344, 344, 344), kernel_sizes=(K53, K15, K15, K33), strides=(S12, S11, S11, S11)), HeadSpec(project_filters=240, head_filters=1024), ), } @tf.keras.utils.register_keras_serializable(package='Vision') class Movinet(tf.keras.Model): """Class to build Movinet family model. Reference: https://arxiv.org/pdf/2103.11511.pdf """ def __init__(self, model_id: str = 'a0', causal: bool = False, use_positional_encoding: bool = False, conv_type: str = '3d', se_type: str = '3d', input_specs: Optional[tf.keras.layers.InputSpec] = None, activation: str = 'swish', gating_activation: str = 'sigmoid', use_sync_bn: bool = True, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'HeNormal', kernel_regularizer: Optional[str] = None, bias_regularizer: Optional[str] = None, stochastic_depth_drop_rate: float = 0., use_external_states: bool = False, output_states: bool = True, average_pooling_type: str = '3d', **kwargs): """MoViNet initialization function. Args: model_id: name of MoViNet backbone model. causal: use causal mode, with CausalConv and CausalSE operations. use_positional_encoding: if True, adds a positional encoding before temporal convolutions and the cumulative global average pooling layers. conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' configures the network to use the default 3D convolution. '2plus1d' uses (2+1)D convolution with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes 3x3 followed by 5x1 conv). '3d_2plus1d' uses (2+1)D convolution with Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 followed by 5x1x1 conv). se_type: '3d', '2d', '2plus3d' or 'none'. '3d' uses the default 3D spatiotemporal global average pooling for squeeze excitation. '2d' uses 2D spatial global average pooling on each frame. '2plus3d' concatenates both 3D and 2D global average pooling. input_specs: the model input spec to use. activation: name of the main activation function. gating_activation: gating activation to use in squeeze excitation layers. use_sync_bn: if True, use synchronized batch normalization. norm_momentum: normalization momentum for the moving average. norm_epsilon: small float added to variance to avoid dividing by zero. kernel_initializer: kernel_initializer for convolutional layers. kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D. Defaults to None. bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d. Defaults to None. stochastic_depth_drop_rate: the base rate for stochastic depth. use_external_states: if True, expects states to be passed as additional input. output_states: if True, output intermediate states that can be used to run the model in streaming mode. Inputting the output states of the previous input clip with the current input clip will utilize a stream buffer for streaming video. average_pooling_type: The average pooling type. Currently supporting ['3d', '2d', 'none']. **kwargs: keyword arguments to be passed. """ block_specs = BLOCK_SPECS[model_id] if input_specs is None: input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, None, 3]) if conv_type not in ('3d', '2plus1d', '3d_2plus1d'): raise ValueError('Unknown conv type: {}'.format(conv_type)) if se_type not in ('3d', '2d', '2plus3d', 'none'): raise ValueError('Unknown squeeze excitation type: {}'.format(se_type)) self._model_id = model_id self._block_specs = block_specs self._causal = causal self._use_positional_encoding = use_positional_encoding self._conv_type = conv_type self._se_type = se_type self._input_specs = input_specs self._use_sync_bn = use_sync_bn self._activation = activation self._gating_activation = gating_activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = tf.keras.layers.BatchNormalization self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._use_external_states = use_external_states self._output_states = output_states self._average_pooling_type = average_pooling_type if self._use_external_states and not self._causal: raise ValueError('External states should be used with causal mode.') if not isinstance(block_specs[0], StemSpec): raise ValueError( 'Expected first spec to be StemSpec, got {}'.format(block_specs[0])) if not isinstance(block_specs[-1], HeadSpec): raise ValueError( 'Expected final spec to be HeadSpec, got {}'.format(block_specs[-1])) self._head_filters = block_specs[-1].head_filters state_specs = None if use_external_states: self._set_dtype_policy(input_specs.dtype) state_specs = self.initial_state_specs(input_specs.shape) inputs, outputs = self._build_network(input_specs, state_specs=state_specs) super(Movinet, self).__init__(inputs=inputs, outputs=outputs, **kwargs) self._state_specs = state_specs def _build_network( self, input_specs: tf.keras.layers.InputSpec, state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, ) -> Tuple[TensorMap, Union[TensorMap, Tuple[TensorMap, TensorMap]]]: """Builds the model network. Args: input_specs: the model input spec to use. state_specs: a dict mapping a state name to the corresponding state spec. State names should match with the `state` input/output dict. Returns: Inputs and outputs as a tuple. Inputs are expected to be a dict with base input and states. Outputs are expected to be a dict of endpoints and (optional) output states. """ state_specs = state_specs if state_specs is not None else {} image_input = tf.keras.Input(shape=input_specs.shape[1:], name='inputs') states = { name: tf.keras.Input(shape=spec.shape[1:], dtype=spec.dtype, name=name) for name, spec in state_specs.items() } inputs = {**states, 'image': image_input} endpoints = {} x = image_input num_layers = sum( len(block.expand_filters) for block in self._block_specs if isinstance(block, MovinetBlockSpec)) stochastic_depth_idx = 1 for block_idx, block in enumerate(self._block_specs): if isinstance(block, StemSpec): layer_obj = movinet_layers.Stem( block.filters, block.kernel_size, block.strides, conv_type=self._conv_type, causal=self._causal, activation=self._activation, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, batch_norm_layer=self._norm, batch_norm_momentum=self._norm_momentum, batch_norm_epsilon=self._norm_epsilon, use_sync_bn=self._use_sync_bn, state_prefix='state_stem', name='stem') x, states = layer_obj(x, states=states) endpoints['stem'] = x elif isinstance(block, MovinetBlockSpec): if not (len(block.expand_filters) == len(block.kernel_sizes) == len(block.strides)): raise ValueError( 'Lengths of block parameters differ: {}, {}, {}'.format( len(block.expand_filters), len(block.kernel_sizes), len(block.strides))) params = list(zip(block.expand_filters, block.kernel_sizes, block.strides)) for layer_idx, layer in enumerate(params): stochastic_depth_drop_rate = ( self._stochastic_depth_drop_rate * stochastic_depth_idx / num_layers) expand_filters, kernel_size, strides = layer name = f'block{block_idx-1}_layer{layer_idx}' layer_obj = movinet_layers.MovinetBlock( block.base_filters, expand_filters, kernel_size=kernel_size, strides=strides, causal=self._causal, activation=self._activation, gating_activation=self._gating_activation, stochastic_depth_drop_rate=stochastic_depth_drop_rate, conv_type=self._conv_type, se_type=self._se_type, use_positional_encoding= self._use_positional_encoding and self._causal, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, batch_norm_layer=self._norm, batch_norm_momentum=self._norm_momentum, batch_norm_epsilon=self._norm_epsilon, use_sync_bn=self._use_sync_bn, state_prefix=f'state_{name}', name=name) x, states = layer_obj(x, states=states) endpoints[name] = x stochastic_depth_idx += 1 elif isinstance(block, HeadSpec): layer_obj = movinet_layers.Head( project_filters=block.project_filters, conv_type=self._conv_type, activation=self._activation, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, batch_norm_layer=self._norm, batch_norm_momentum=self._norm_momentum, batch_norm_epsilon=self._norm_epsilon, use_sync_bn=self._use_sync_bn, average_pooling_type=self._average_pooling_type, state_prefix='state_head', name='head') x, states = layer_obj(x, states=states) endpoints['head'] = x else: raise ValueError('Unknown block type {}'.format(block)) outputs = (endpoints, states) if self._output_states else endpoints return inputs, outputs def _get_initial_state_shapes( self, block_specs: Sequence[BlockSpec], input_shape: Union[Sequence[int], tf.Tensor], use_positional_encoding: bool = False) -> Dict[str, Sequence[int]]: """Generates names and shapes for all input states. Args: block_specs: sequence of specs used for creating a model. input_shape: the expected 5D shape of the image input. use_positional_encoding: whether the model will use positional encoding. Returns: A dict mapping state names to state shapes. """ def divide_resolution(shape, num_downsamples): """Downsamples the dimension to calculate strided convolution shape.""" if shape is None: return None if isinstance(shape, tf.Tensor): # Avoid using div and ceil to support tf lite shape = tf.cast(shape, tf.float32) resolution_divisor = 2 ** num_downsamples resolution_multiplier = 0.5 ** num_downsamples shape = ((shape + resolution_divisor - 1) * resolution_multiplier) return tf.cast(shape, tf.int32) else: resolution_divisor = 2 ** num_downsamples return math.ceil(shape / resolution_divisor) states = {} num_downsamples = 0 for block_idx, block in enumerate(block_specs): if isinstance(block, StemSpec): if block.kernel_size[0] > 1: states['state_stem_stream_buffer'] = ( input_shape[0], input_shape[1], divide_resolution(input_shape[2], num_downsamples), divide_resolution(input_shape[3], num_downsamples), block.filters, ) num_downsamples += 1 elif isinstance(block, MovinetBlockSpec): block_idx -= 1 params = list(zip( block.expand_filters, block.kernel_sizes, block.strides)) for layer_idx, layer in enumerate(params): expand_filters, kernel_size, strides = layer # If we use a 2D kernel, we apply spatial downsampling # before the buffer. if (tuple(strides[1:3]) != (1, 1) and self._conv_type in ['2plus1d', '3d_2plus1d']): num_downsamples += 1 prefix = f'state_block{block_idx}_layer{layer_idx}' if kernel_size[0] > 1: states[f'{prefix}_stream_buffer'] = ( input_shape[0], kernel_size[0] - 1, divide_resolution(input_shape[2], num_downsamples), divide_resolution(input_shape[3], num_downsamples), expand_filters, ) if '3d' in self._se_type: states[f'{prefix}_pool_buffer'] = ( input_shape[0], 1, 1, 1, expand_filters, ) states[f'{prefix}_pool_frame_count'] = (1,) if use_positional_encoding: name = f'{prefix}_pos_enc_frame_count' states[name] = (1,) if strides[1] != strides[2]: raise ValueError('Strides must match in the spatial dimensions, ' 'got {}'.format(strides)) # If we use a 3D kernel, we apply spatial downsampling # after the buffer. if (tuple(strides[1:3]) != (1, 1) and self._conv_type not in ['2plus1d', '3d_2plus1d']): num_downsamples += 1 elif isinstance(block, HeadSpec): states['state_head_pool_buffer'] = ( input_shape[0], 1, 1, 1, block.project_filters, ) states['state_head_pool_frame_count'] = (1,) return states def _get_state_dtype(self, name: str) -> str: """Returns the dtype associated with a state.""" if 'frame_count' in name: return 'int32' return self.dtype def initial_state_specs( self, input_shape: Sequence[int]) -> Dict[str, tf.keras.layers.InputSpec]: """Creates a mapping of state name to InputSpec from the input shape.""" state_shapes = self._get_initial_state_shapes( self._block_specs, input_shape, use_positional_encoding=self._use_positional_encoding) return { name: tf.keras.layers.InputSpec( shape=shape, dtype=self._get_state_dtype(name)) for name, shape in state_shapes.items() } def init_states(self, input_shape: Sequence[int]) -> Dict[str, tf.Tensor]: """Returns initial states for the first call in steaming mode.""" state_shapes = self._get_initial_state_shapes( self._block_specs, input_shape, use_positional_encoding=self._use_positional_encoding) states = { name: tf.zeros(shape, dtype=self._get_state_dtype(name)) for name, shape in state_shapes.items() } return states @property def use_external_states(self) -> bool: """Whether this model is expecting input states as additional input.""" return self._use_external_states @property def head_filters(self): """The number of filters expected to be in the head classifer layer.""" return self._head_filters @property def conv_type(self): """The expected convolution type (see __init__ for more details).""" return self._conv_type def get_config(self): config_dict = { 'model_id': self._model_id, 'causal': self._causal, 'use_positional_encoding': self._use_positional_encoding, 'conv_type': self._conv_type, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'use_external_states': self._use_external_states, 'output_states': self._output_states, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @factory.register_backbone_builder('movinet') def build_movinet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds MoViNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() if backbone_type != 'movinet': raise ValueError(f'Inconsistent backbone type {backbone_type}') if norm_activation_config.activation is not None: logging.warn('norm_activation is not used in MoViNets, but specified: ' '%s', norm_activation_config.activation) logging.warn('norm_activation is ignored.') return Movinet( model_id=backbone_cfg.model_id, causal=backbone_cfg.causal, use_positional_encoding=backbone_cfg.use_positional_encoding, conv_type=backbone_cfg.conv_type, se_type=backbone_cfg.se_type, input_specs=input_specs, activation=backbone_cfg.activation, gating_activation=backbone_cfg.gating_activation, output_states=backbone_cfg.output_states, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate, use_external_states=backbone_cfg.use_external_states, average_pooling_type=backbone_cfg.average_pooling_type)
29,503
38.816464
139
py
models
models-master/official/projects/movinet/modeling/movinet_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for movinet_model.py.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.movinet.modeling import movinet from official.projects.movinet.modeling import movinet_model class MovinetModelTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters(False, True) def test_movinet_classifier_creation(self, is_training): """Test for creation of a Movinet classifier.""" temporal_size = 16 spatial_size = 224 tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec( shape=[None, temporal_size, spatial_size, spatial_size, 3]) backbone = movinet.Movinet(model_id='a0', input_specs=input_specs) num_classes = 1000 model = movinet_model.MovinetClassifier( backbone=backbone, num_classes=num_classes, input_specs={'image': input_specs}, dropout_rate=0.2) inputs = np.random.rand(2, temporal_size, spatial_size, spatial_size, 3) logits = model(inputs, training=is_training) self.assertAllEqual([2, num_classes], logits.shape) def test_movinet_classifier_stream(self): """Test if the classifier can be run in streaming mode.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, ) model = movinet_model.MovinetClassifier( backbone, num_classes=600, output_states=True) inputs = tf.ones([1, 8, 172, 172, 3]) init_states = model.init_states(tf.shape(inputs)) expected, _ = model({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = model({**states, 'image': frame}) predicted = output self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_movinet_classifier_stream_pos_enc(self): """Test if the classifier can be run in streaming mode with pos encoding.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, use_positional_encoding=True, ) model = movinet_model.MovinetClassifier( backbone, num_classes=600, output_states=True) inputs = tf.ones([1, 8, 172, 172, 3]) init_states = model.init_states(tf.shape(inputs)) expected, _ = model({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = model({**states, 'image': frame}) predicted = output self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_movinet_classifier_stream_pos_enc_2plus1d(self): """Test if the model can run in streaming mode with pos encoding, (2+1)D.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, use_positional_encoding=True, conv_type='2plus1d', ) model = movinet_model.MovinetClassifier( backbone, num_classes=600, output_states=True) inputs = tf.ones([1, 8, 172, 172, 3]) init_states = model.init_states(tf.shape(inputs)) expected, _ = model({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = model({**states, 'image': frame}) predicted = output self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_movinet_classifier_mobile(self): """Test if the model can run with mobile parameters.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, conv_type='2plus1d', se_type='2plus3d', activation='hard_swish', gating_activation='hard_sigmoid' ) model = movinet_model.MovinetClassifier( backbone, num_classes=600, output_states=True) inputs = tf.ones([1, 8, 172, 172, 3]) init_states = model.init_states(tf.shape(inputs)) expected, _ = model({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = model({**states, 'image': frame}) predicted = output self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_serialize_deserialize(self): """Validate the classification network can be serialized and deserialized.""" backbone = movinet.Movinet(model_id='a0') model = movinet_model.MovinetClassifier(backbone=backbone, num_classes=1000) config = model.get_config() new_model = movinet_model.MovinetClassifier.from_config(config) # Validate that the config can be forced to JSON. new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) def test_saved_model_save_load(self): backbone = movinet.Movinet('a0') model = movinet_model.MovinetClassifier( backbone, num_classes=600) model.build([1, 5, 172, 172, 3]) model.compile(metrics=['acc']) tf.keras.models.save_model(model, '/tmp/movinet/') loaded_model = tf.keras.models.load_model('/tmp/movinet/') output = loaded_model(dict(image=tf.ones([1, 1, 1, 1, 3]))) self.assertAllEqual(output.shape, [1, 600]) @parameterized.parameters( ('a0', 3.126071), ('a1', 4.717912), ('a2', 5.280922), ('a3', 7.443289), ('a4', 11.422727), ('a5', 18.763355), ('t0', 1.740502), ) def test_movinet_models(self, model_id, expected_params_millions): """Test creation of MoViNet family models with states.""" tf.keras.backend.set_image_data_format('channels_last') model = movinet_model.MovinetClassifier( backbone=movinet.Movinet( model_id=model_id, causal=True), num_classes=600) model.build([1, 1, 1, 1, 3]) num_params_millions = model.count_params() / 1e6 self.assertEqual(num_params_millions, expected_params_millions) def test_movinet_a0_2plus1d(self): """Test creation of MoViNet with 2plus1d configuration.""" tf.keras.backend.set_image_data_format('channels_last') model_2plus1d = movinet_model.MovinetClassifier( backbone=movinet.Movinet( model_id='a0', conv_type='2plus1d'), num_classes=600) model_2plus1d.build([1, 1, 1, 1, 3]) model_3d_2plus1d = movinet_model.MovinetClassifier( backbone=movinet.Movinet( model_id='a0', conv_type='3d_2plus1d'), num_classes=600) model_3d_2plus1d.build([1, 1, 1, 1, 3]) # Ensure both models have the same weights weights = [] for var_2plus1d, var_3d_2plus1d in zip( model_2plus1d.get_weights(), model_3d_2plus1d.get_weights()): if var_2plus1d.shape == var_3d_2plus1d.shape: weights.append(var_3d_2plus1d) else: if var_3d_2plus1d.shape[0] == 1: weight = var_3d_2plus1d[0] else: weight = var_3d_2plus1d[:, 0] if weight.shape[-1] != var_2plus1d.shape[-1]: # Transpose any depthwise kernels (conv3d --> depthwise_conv2d) weight = tf.transpose(weight, perm=(0, 1, 3, 2)) weights.append(weight) model_2plus1d.set_weights(weights) inputs = tf.ones([2, 8, 172, 172, 3], dtype=tf.float32) logits_2plus1d = model_2plus1d(inputs) logits_3d_2plus1d = model_3d_2plus1d(inputs) # Ensure both models have the same output, since the weights are the same self.assertAllEqual(logits_2plus1d.shape, logits_3d_2plus1d.shape) self.assertAllClose(logits_2plus1d, logits_3d_2plus1d, 1e-5, 1e-5) if __name__ == '__main__': tf.test.main()
8,838
32.481061
81
py
models
models-master/official/projects/movinet/modeling/movinet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for movinet.py.""" from absl.testing import parameterized import tensorflow as tf from official.projects.movinet.modeling import movinet class MoViNetTest(parameterized.TestCase, tf.test.TestCase): def test_network_creation(self): """Test creation of MoViNet family models.""" tf.keras.backend.set_image_data_format('channels_last') network = movinet.Movinet( model_id='a0', causal=True, ) inputs = tf.keras.Input(shape=(8, 128, 128, 3), batch_size=1) endpoints, states = network(inputs) self.assertAllEqual(endpoints['stem'].shape, [1, 8, 64, 64, 8]) self.assertAllEqual(endpoints['block0_layer0'].shape, [1, 8, 32, 32, 8]) self.assertAllEqual(endpoints['block1_layer0'].shape, [1, 8, 16, 16, 32]) self.assertAllEqual(endpoints['block2_layer0'].shape, [1, 8, 8, 8, 56]) self.assertAllEqual(endpoints['block3_layer0'].shape, [1, 8, 8, 8, 56]) self.assertAllEqual(endpoints['block4_layer0'].shape, [1, 8, 4, 4, 104]) self.assertAllEqual(endpoints['head'].shape, [1, 1, 1, 1, 480]) self.assertNotEmpty(states) def test_network_with_states(self): """Test creation of MoViNet family models with states.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, ) inputs = tf.ones([1, 8, 128, 128, 3]) init_states = backbone.init_states(tf.shape(inputs)) endpoints, new_states = backbone({**init_states, 'image': inputs}) self.assertAllEqual(endpoints['stem'].shape, [1, 8, 64, 64, 8]) self.assertAllEqual(endpoints['block0_layer0'].shape, [1, 8, 32, 32, 8]) self.assertAllEqual(endpoints['block1_layer0'].shape, [1, 8, 16, 16, 32]) self.assertAllEqual(endpoints['block2_layer0'].shape, [1, 8, 8, 8, 56]) self.assertAllEqual(endpoints['block3_layer0'].shape, [1, 8, 8, 8, 56]) self.assertAllEqual(endpoints['block4_layer0'].shape, [1, 8, 4, 4, 104]) self.assertAllEqual(endpoints['head'].shape, [1, 1, 1, 1, 480]) self.assertNotEmpty(init_states) self.assertNotEmpty(new_states) def test_movinet_stream(self): """Test if the backbone can be run in streaming mode.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, ) inputs = tf.ones([1, 5, 128, 128, 3]) init_states = backbone.init_states(tf.shape(inputs)) expected_endpoints, _ = backbone({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = backbone({**states, 'image': frame}) predicted_endpoints = output predicted = predicted_endpoints['head'] # The expected final output is simply the mean across frames expected = expected_endpoints['head'] expected = tf.reduce_mean(expected, 1, keepdims=True) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_movinet_stream_nse(self): """Test if the backbone can be run in streaming mode w/o SE layer.""" tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, use_external_states=True, se_type='none', ) inputs = tf.ones([1, 5, 128, 128, 3]) init_states = backbone.init_states(tf.shape(inputs)) expected_endpoints, _ = backbone({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = backbone({**states, 'image': frame}) predicted_endpoints = output predicted = predicted_endpoints['head'] # The expected final output is simply the mean across frames expected = expected_endpoints['head'] expected = tf.reduce_mean(expected, 1, keepdims=True) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) # Check contents in the states dictionary. state_keys = list(init_states.keys()) self.assertIn('state_head_pool_buffer', state_keys) self.assertIn('state_head_pool_frame_count', state_keys) state_keys.remove('state_head_pool_buffer') state_keys.remove('state_head_pool_frame_count') # From now on, there are only 'stream_buffer' for the convolutions. for state_key in state_keys: self.assertIn( 'stream_buffer', state_key, msg=f'Expecting stream_buffer only, found {state_key}') def test_movinet_2plus1d_stream(self): tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, conv_type='2plus1d', use_external_states=True, ) inputs = tf.ones([1, 5, 128, 128, 3]) init_states = backbone.init_states(tf.shape(inputs)) expected_endpoints, _ = backbone({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = backbone({**states, 'image': frame}) predicted_endpoints = output predicted = predicted_endpoints['head'] # The expected final output is simply the mean across frames expected = expected_endpoints['head'] expected = tf.reduce_mean(expected, 1, keepdims=True) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_movinet_3d_2plus1d_stream(self): tf.keras.backend.set_image_data_format('channels_last') backbone = movinet.Movinet( model_id='a0', causal=True, conv_type='3d_2plus1d', use_external_states=True, ) inputs = tf.ones([1, 5, 128, 128, 3]) init_states = backbone.init_states(tf.shape(inputs)) expected_endpoints, _ = backbone({**init_states, 'image': inputs}) frames = tf.split(inputs, inputs.shape[1], axis=1) states = init_states for frame in frames: output, states = backbone({**states, 'image': frame}) predicted_endpoints = output predicted = predicted_endpoints['head'] # The expected final output is simply the mean across frames expected = expected_endpoints['head'] expected = tf.reduce_mean(expected, 1, keepdims=True) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id='a0', causal=True, use_positional_encoding=True, use_external_states=True, ) network = movinet.Movinet(**kwargs) # Create another network object from the first object's config. new_network = movinet.Movinet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
7,863
33.79646
79
py
models
models-master/official/projects/movinet/modeling/movinet_layers_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for movinet_layers.py.""" from absl.testing import parameterized import tensorflow as tf from official.projects.movinet.modeling import movinet_layers from official.vision.modeling.layers import nn_layers class MovinetLayersTest(parameterized.TestCase, tf.test.TestCase): def test_squeeze3d(self): squeeze = movinet_layers.Squeeze3D() inputs = tf.ones([5, 1, 1, 1, 3]) predicted = squeeze(inputs) expected = tf.ones([5, 3]) self.assertEqual(predicted.shape, expected.shape) self.assertAllEqual(predicted, expected) def test_mobile_conv2d(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer='ones', use_bias=False, use_depthwise=False, use_temporal=False, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 2, 3]) predicted = conv2d(inputs) expected = tf.constant( [[[[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]], [[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_mobile_conv2d_bn(self): batch_norm_op = tf.keras.layers.BatchNormalization( momentum=0.9, epsilon=1., name='bn') conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer='ones', use_bias=False, use_depthwise=False, use_temporal=False, use_buffered_input=True, batch_norm_op=batch_norm_op, ) inputs = tf.ones([1, 2, 2, 2, 3]) predicted = conv2d(inputs) expected = tf.constant( [[[[[8.48528, 8.48528, 8.48528], [8.48528, 8.48528, 8.48528]], [[8.48528, 8.48528, 8.48528], [8.48528, 8.48528, 8.48528]]], [[[8.48528, 8.48528, 8.48528], [8.48528, 8.48528, 8.48528]], [[8.48528, 8.48528, 8.48528], [8.48528, 8.48528, 8.48528]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_mobile_conv2d_activation(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer='ones', use_bias=False, use_depthwise=False, use_temporal=False, use_buffered_input=True, activation_op=tf.nn.relu6, ) inputs = tf.ones([1, 2, 2, 2, 3]) predicted = conv2d(inputs) expected = tf.constant( [[[[[6., 6., 6.], [6., 6., 6.]], [[6., 6., 6.], [6., 6., 6.]]], [[[6., 6., 6.], [6., 6., 6.]], [[6., 6., 6.], [6., 6., 6.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_mobile_conv2d_temporal(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 1), strides=(1, 1), padding='causal', kernel_initializer='ones', use_bias=False, use_depthwise=True, use_temporal=True, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 1, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv2d(padded_inputs) expected = tf.constant( [[[[[1., 1., 1.]], [[1., 1., 1.]]], [[[2., 2., 2.]], [[2., 2., 2.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_buffer(self): conv3d_stream = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=True, ) buffer = movinet_layers.StreamBuffer(buffer_size=2) conv3d = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=False, ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv3d(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = buffer(frame, states=states) x = conv3d_stream(x) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[12., 12., 12.]]], [[[24., 24., 24.]]], [[[36., 36., 36.]]], [[[36., 36., 36.]]]]]) def test_stream_conv_block_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block_3d_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[11.994005, 11.994005, 11.994005]]], [[[23.988010, 23.988010, 23.988010]]], [[[35.982014, 35.982014, 35.982014]]], [[[35.982014, 35.982014, 35.982014]]]]]) def test_stream_squeeze_excitation(self): se = movinet_layers.StreamSqueezeExcitation( 3, causal=True, kernel_initializer='ones') inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = se(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = se(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) self.assertAllClose( predicted, [[[[[0.9998109, 0.9998109, 0.9998109]], [[0.9998109, 0.9998109, 0.9998109]]], [[[1.9999969, 1.9999969, 1.9999969]], [[1.9999969, 1.9999969, 1.9999969]]], [[[3., 3., 3.]], [[3., 3., 3.]]], [[[4., 4., 4.]], [[4., 4., 4.]]]]], 1e-5, 1e-5) def test_stream_squeeze_excitation_2plus3d(self): se = movinet_layers.StreamSqueezeExcitation( 3, se_type='2plus3d', causal=True, activation='hard_swish', gating_activation='hard_sigmoid', kernel_initializer='ones') inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = se(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = se(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, atol=1e-4) self.assertAllClose( predicted, [[[[[1., 1., 1.]], [[1., 1., 1.]]], [[[2., 2., 2.]], [[2., 2., 2.]]], [[[3., 3., 3.]], [[3., 3., 3.]]], [[[4., 4., 4.]], [[4., 4., 4.]]]]], atol=1e-4) def test_stream_movinet_block(self): block = movinet_layers.MovinetBlock( out_filters=3, expand_filters=6, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, ) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = block(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_movinet_block_none_se(self): block = movinet_layers.MovinetBlock( out_filters=3, expand_filters=6, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, se_type='none', state_prefix='test', ) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, expected_states = block(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllEqual(list(expected_states.keys()), ['test_stream_buffer']) def test_stream_classifier_head(self): head = movinet_layers.Head(project_filters=5) classifier_head = movinet_layers.ClassifierHead( head_filters=10, num_classes=4) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) x, _ = head(inputs) expected = classifier_head(x) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} for frame in frames: x, states = head(frame, states=states) predicted = classifier_head(x) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) if __name__ == '__main__': tf.test.main()
14,957
28.975952
77
py
models
models-master/official/projects/token_dropping/masked_lm.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked language task.""" import dataclasses from typing import Tuple import tensorflow as tf from official.core import task_factory from official.nlp.tasks import masked_lm @dataclasses.dataclass class TokenDropMaskedLMConfig(masked_lm.MaskedLMConfig): """The model config.""" pass @task_factory.register_task_cls(TokenDropMaskedLMConfig) class TokenDropMaskedLMTask(masked_lm.MaskedLMTask): """Task object for Mask language modeling.""" def build_losses(self, labels, model_outputs, metrics, aux_losses=None) -> Tuple[tf.Tensor, tf.Tensor]: """Return the final loss, and the masked-lm loss.""" with tf.name_scope('MaskedLMTask/losses'): metrics = dict([(metric.name, metric) for metric in metrics]) lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy( labels['masked_lm_ids'], tf.cast(model_outputs['mlm_logits'], tf.float32), from_logits=True) lm_label_weights = labels['masked_lm_weights'] lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights) lm_denominator_loss = tf.reduce_sum(lm_label_weights) mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss) metrics['lm_example_loss'].update_state(mlm_loss) if 'next_sentence_labels' in labels: sentence_labels = labels['next_sentence_labels'] sentence_outputs = tf.cast( model_outputs['next_sentence'], dtype=tf.float32) sentence_loss = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( sentence_labels, sentence_outputs, from_logits=True)) metrics['next_sentence_loss'].update_state(sentence_loss) total_loss = mlm_loss + sentence_loss else: total_loss = mlm_loss if aux_losses: total_loss += tf.add_n(aux_losses) return total_loss, lm_prediction_losses def train_step(self, inputs, model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ with tf.GradientTape() as tape: outputs = model(inputs, training=True) # Computes per-replica loss. loss, lm_prediction_losses = self.build_losses( labels=inputs, model_outputs=outputs, metrics=metrics, aux_losses=model.losses) model.encoder_network.record_mlm_loss( mlm_ids=inputs['masked_lm_ids'], mlm_losses=lm_prediction_losses) if self.task_config.scale_loss: # Scales loss as the default gradients allreduce performs sum inside the # optimizer. scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync tvars = model.trainable_variables if self.task_config.scale_loss: grads = tape.gradient(scaled_loss, tvars) else: grads = tape.gradient(loss, tvars) optimizer.apply_gradients(list(zip(grads, tvars))) self.process_metrics(metrics, inputs, outputs) return {self.loss: loss} def validation_step(self, inputs, model: tf.keras.Model, metrics): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ outputs = self.inference_step(inputs, model) loss, _ = self.build_losses( labels=inputs, model_outputs=outputs, metrics=metrics, aux_losses=model.losses) self.process_metrics(metrics, inputs, outputs) return {self.loss: loss}
4,551
35.416
80
py
models
models-master/official/projects/token_dropping/encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer-based BERT encoder network.""" # pylint: disable=g-classes-have-attributes from typing import Any, Callable, Optional, Union, Tuple from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Initializer = Union[str, tf.keras.initializers.Initializer] _Activation = Union[str, Callable[..., Any]] _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) class TokenDropBertEncoder(tf.keras.layers.Layer): """Bi-directional Transformer-based encoder network with token dropping. During pretraining, we drop unimportant tokens starting from an intermediate layer in the model, to make the model focus on important tokens more efficiently with its limited computational resources. The dropped tokens are later picked up by the last layer of the model, so that the model still produces full-length sequences. This approach reduces the pretraining cost of BERT by 25% while achieving better overall fine-tuning performance on standard downstream tasks. Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. token_loss_init_value: The default loss value of a token, when the token is never masked and predicted. token_loss_beta: How running average factor for computing the average loss value of a token. token_keep_k: The number of tokens you want to keep in the intermediate layers. The rest will be dropped in those layers. token_allow_list: The list of token-ids that should not be droped. In the BERT English vocab, token-id from 1 to 998 contains special tokens such as [CLS], [SEP]. By default, token_allow_list contains all of these special tokens. token_deny_list: The list of token-ids that should always be droped. In the BERT English vocab, token-id=0 means [PAD]. By default, token_deny_list contains and only contains [PAD]. initializer: The initialzer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. with_dense_inputs: Whether to accept dense embeddings as the input. """ def __init__( self, vocab_size: int, hidden_size: int = 768, num_layers: int = 12, num_attention_heads: int = 12, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 3072, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, token_loss_init_value: float = 10.0, token_loss_beta: float = 0.995, token_keep_k: int = 256, token_allow_list: Tuple[int, ...] = (100, 101, 102, 103), token_deny_list: Tuple[int, ...] = (0,), initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, with_dense_inputs: bool = False, **kwargs): # Pops kwargs that are used in V1 implementation. if 'dict_outputs' in kwargs: kwargs.pop('dict_outputs') if 'return_all_encoder_outputs' in kwargs: kwargs.pop('return_all_encoder_outputs') if 'intermediate_size' in kwargs: inner_dim = kwargs.pop('intermediate_size') if 'activation' in kwargs: inner_activation = kwargs.pop('activation') if 'dropout_rate' in kwargs: output_dropout = kwargs.pop('dropout_rate') if 'attention_dropout_rate' in kwargs: attention_dropout = kwargs.pop('attention_dropout_rate') super().__init__(**kwargs) if output_range is not None: logging.warning('`output_range` is available as an argument for `call()`.' 'The `output_range` as __init__ argument is deprecated.') activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') # The first 999 tokens are special tokens such as [PAD], [CLS], [SEP]. # We want to always mask [PAD], and always not to maks [CLS], [SEP]. init_importance = tf.constant(token_loss_init_value, shape=(vocab_size)) if token_allow_list: init_importance = tf.tensor_scatter_nd_update( tensor=init_importance, indices=[[x] for x in token_allow_list], updates=[1.0e4 for x in token_allow_list]) if token_deny_list: init_importance = tf.tensor_scatter_nd_update( tensor=init_importance, indices=[[x] for x in token_deny_list], updates=[-1.0e4 for x in token_deny_list]) self._token_importance_embed = layers.TokenImportanceWithMovingAvg( vocab_size=vocab_size, init_importance=init_importance, moving_average_beta=token_loss_beta) self._token_separator = layers.SelectTopK(top_k=token_keep_k) self._transformer_layers = [] self._num_layers = num_layers self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') for i in range(num_layers): layer = layers.TransformerEncoderBlock( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % i) self._transformer_layers.append(layer) self._pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf.keras.activations.serialize(activation), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'token_loss_init_value': token_loss_init_value, 'token_loss_beta': token_loss_beta, 'token_keep_k': token_keep_k, 'token_allow_list': token_allow_list, 'token_deny_list': token_deny_list, 'initializer': tf.keras.initializers.serialize(initializer), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'with_dense_inputs': with_dense_inputs, } if with_dense_inputs: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_inputs=tf.keras.Input( shape=(None, embedding_width), dtype=tf.float32), dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), ) else: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32)) def call(self, inputs, output_range: Optional[tf.Tensor] = None): if isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) else: raise ValueError('Unexpected inputs type to %s.' % self.__class__) word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Concat the dense embeddings at sequence end. word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) mask = tf.concat([mask, dense_mask], axis=1) # absolute position embeddings. position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = self._type_embedding_layer(type_ids) embeddings = word_embeddings + position_embeddings + type_embeddings embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] x = embeddings # Get token routing. token_importance = self._token_importance_embed(word_ids) selected, not_selected = self._token_separator(token_importance) # For a 12-layer BERT: # 1. All tokens fist go though 5 transformer layers, then # 2. Only important tokens go through 1 transformer layer with cross # attention to unimportant tokens, then # 3. Only important tokens go through 5 transformer layers without cross # attention. # 4. Finally, all tokens go through the last layer. # Step 1. for i, layer in enumerate(self._transformer_layers[:self._num_layers // 2 - 1]): x = layer([x, attention_mask], output_range=output_range if i == self._num_layers - 1 else None) encoder_outputs.append(x) # Step 2. # First, separate important and non-important tokens. x_selected = tf.gather(x, selected, batch_dims=1, axis=1) mask_selected = tf.gather(mask, selected, batch_dims=1, axis=1) attention_mask_token_drop = self._attention_mask_layer( x_selected, mask_selected) x_not_selected = tf.gather(x, not_selected, batch_dims=1, axis=1) mask_not_selected = tf.gather(mask, not_selected, batch_dims=1, axis=1) attention_mask_token_pass = self._attention_mask_layer( x_selected, tf.concat([mask_selected, mask_not_selected], axis=1)) x_all = tf.concat([x_selected, x_not_selected], axis=1) # Then, call transformer layer with cross attention. x_selected = self._transformer_layers[self._num_layers // 2 - 1]( [x_selected, x_all, attention_mask_token_pass], output_range=output_range if self._num_layers // 2 - 1 == self._num_layers - 1 else None) encoder_outputs.append(x_selected) # Step 3. for i, layer in enumerate(self._transformer_layers[self._num_layers // 2:-1]): x_selected = layer([x_selected, attention_mask_token_drop], output_range=output_range if i == self._num_layers - 1 else None) encoder_outputs.append(x_selected) # Step 4. # First, merge important and non-important tokens. x_not_selected = tf.cast(x_not_selected, dtype=x_selected.dtype) x = tf.concat([x_selected, x_not_selected], axis=1) indices = tf.concat([selected, not_selected], axis=1) reverse_indices = tf.argsort(indices) x = tf.gather(x, reverse_indices, batch_dims=1, axis=1) # Then, call transformer layer with all tokens. x = self._transformer_layers[-1]([x, attention_mask], output_range=output_range) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] first_token_tensor = last_encoder_output[:, 0, :] pooled_output = self._pooler_layer(first_token_tensor) return dict( sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) def record_mlm_loss(self, mlm_ids: tf.Tensor, mlm_losses: tf.Tensor): self._token_importance_embed.update_token_importance( token_ids=mlm_ids, importance=mlm_losses) def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config)
17,125
41.708229
80
py
models
models-master/official/projects/token_dropping/encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for transformer-based bert encoder network.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import bert_encoder from official.projects.token_dropping import encoder class TokenDropBertEncoderTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(TokenDropBertEncoderTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") def test_dict_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_dict_outputs_all_encoder_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True, token_keep_k=sequence_length, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_dict_outputs_network_creation_with_float16_dtype(self): hidden_size = 32 sequence_length = 21 tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=4, dict_outputs=True, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) @parameterized.named_parameters( ("all_sequence_encoder", None, 21), ("output_range_encoder", 1, 1), ) def test_dict_outputs_network_invocation( self, output_range, out_seq_len): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, dict_outputs=True, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids), output_range=output_range) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], out_seq_len) # Creates a BertEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, dict_outputs=True, token_keep_k=2, token_allow_list=(), token_deny_list=()) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], sequence_length) # Creates a BertEncoder with embedding_width != hidden_size test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=16, dict_outputs=True, token_keep_k=2, token_allow_list=(), token_deny_list=()) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) def test_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). inputs = dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids) _ = test_network(inputs) def test_all_encoder_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, return_all_encoder_outputs=True, token_keep_k=sequence_length, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_network_creation_with_float16_dtype(self): hidden_size = 32 sequence_length = 21 tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=4, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) @parameterized.named_parameters( ("all_sequence", None, 21), ("output_range", 1, 1), ) def test_network_invocation(self, output_range, out_seq_len): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 # Create a small BertEncoder for testing. test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, token_keep_k=2, token_allow_list=(), token_deny_list=()) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids), output_range=output_range) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], out_seq_len) # Creates a BertEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, token_keep_k=2, token_allow_list=(), token_deny_list=()) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], sequence_length) # Creates a BertEncoder with embedding_width != hidden_size test_network = encoder.TokenDropBertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=16, token_keep_k=2, token_allow_list=(), token_deny_list=()) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) class TokenDropCompatibilityTest(tf.test.TestCase): def tearDown(self): super().tearDown() tf.keras.mixed_precision.set_global_policy("float32") def test_checkpoint_forward_compatible(self): batch_size = 3 hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 kwargs = dict( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, output_range=None) word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data = dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data) old_net = bert_encoder.BertEncoderV2(**kwargs) old_net_outputs = old_net(data) ckpt = tf.train.Checkpoint(net=old_net) path = ckpt.save(self.get_temp_dir()) new_net = encoder.TokenDropBertEncoder( token_keep_k=sequence_length, token_allow_list=(), token_deny_list=(), **kwargs) new_ckpt = tf.train.Checkpoint(net=new_net) status = new_ckpt.restore(path) status.assert_existing_objects_matched() # assert_consumed will fail because the old model has redundant nodes. new_net_outputs = new_net(data) self.assertAllEqual(old_net_outputs.keys(), new_net_outputs.keys()) for key in old_net_outputs: self.assertAllClose(old_net_outputs[key], new_net_outputs[key]) def test_keras_model_checkpoint_forward_compatible(self): batch_size = 3 hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 kwargs = dict( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, output_range=None) word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data = dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data) old_net = bert_encoder.BertEncoderV2(**kwargs) inputs = old_net.inputs outputs = old_net(inputs) old_model = tf.keras.Model(inputs=inputs, outputs=outputs) old_model_outputs = old_model(data) ckpt = tf.train.Checkpoint(net=old_model) path = ckpt.save(self.get_temp_dir()) new_net = encoder.TokenDropBertEncoder( token_keep_k=sequence_length, token_allow_list=(), token_deny_list=(), **kwargs) inputs = new_net.inputs outputs = new_net(inputs) new_model = tf.keras.Model(inputs=inputs, outputs=outputs) new_ckpt = tf.train.Checkpoint(net=new_model) new_ckpt.restore(path) new_model_outputs = new_model(data) self.assertAllEqual(old_model_outputs.keys(), new_model_outputs.keys()) for key in old_model_outputs: self.assertAllClose(old_model_outputs[key], new_model_outputs[key]) if __name__ == "__main__": tf.test.main()
20,527
38.401152
80
py
models
models-master/official/projects/token_dropping/masked_lm_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.nlp.tasks.masked_lm.""" import tensorflow as tf from official.nlp.configs import bert from official.nlp.configs import encoders from official.nlp.data import pretrain_dataloader from official.projects.token_dropping import encoder_config from official.projects.token_dropping import masked_lm class MLMTaskTest(tf.test.TestCase): def test_task(self): config = masked_lm.TokenDropMaskedLMConfig( init_checkpoint=self.get_temp_dir(), scale_loss=True, model=bert.PretrainerConfig( encoder=encoders.EncoderConfig( any=encoder_config.TokenDropBertEncoderConfig( vocab_size=30522, num_layers=1, token_keep_k=64), type="any"), cls_heads=[ bert.ClsHeadConfig( inner_dim=10, num_classes=2, name="next_sentence") ]), train_data=pretrain_dataloader.BertPretrainDataConfig( input_path="dummy", max_predictions_per_seq=20, seq_length=128, global_batch_size=1)) task = masked_lm.TokenDropMaskedLMTask(config) model = task.build_model() metrics = task.build_metrics() dataset = task.build_inputs(config.train_data) iterator = iter(dataset) optimizer = tf.keras.optimizers.SGD(lr=0.1) task.train_step(next(iterator), model, optimizer, metrics=metrics) task.validation_step(next(iterator), model, metrics=metrics) # Saves a checkpoint. ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items) ckpt.save(config.init_checkpoint) task.initialize(model) if __name__ == "__main__": tf.test.main()
2,284
34.703125
74
py
models
models-master/official/projects/token_dropping/encoder_config.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Token dropping encoder configuration and instantiation.""" import dataclasses from typing import Tuple import tensorflow as tf from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.nlp.configs import encoders from official.projects.token_dropping import encoder @dataclasses.dataclass class TokenDropBertEncoderConfig(encoders.BertEncoderConfig): token_loss_init_value: float = 10.0 token_loss_beta: float = 0.995 token_keep_k: int = 256 token_allow_list: Tuple[int, ...] = (100, 101, 102, 103) token_deny_list: Tuple[int, ...] = (0,) @base_config.bind(TokenDropBertEncoderConfig) def get_encoder(encoder_cfg: TokenDropBertEncoderConfig): """Instantiates 'TokenDropBertEncoder'. Args: encoder_cfg: A 'TokenDropBertEncoderConfig'. Returns: A 'encoder.TokenDropBertEncoder' object. """ return encoder.TokenDropBertEncoder( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_width=encoder_cfg.embedding_size, return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs, dict_outputs=True, norm_first=encoder_cfg.norm_first, token_loss_init_value=encoder_cfg.token_loss_init_value, token_loss_beta=encoder_cfg.token_loss_beta, token_keep_k=encoder_cfg.token_keep_k, token_allow_list=encoder_cfg.token_allow_list, token_deny_list=encoder_cfg.token_deny_list)
2,660
38.132353
74
py
models
models-master/official/projects/perceiver/modeling/networks/positional_decoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver networks.""" import tensorflow as tf from official.nlp.modeling import layers class PositionalDecoder(tf.keras.layers.Layer): """Perceiver Positional Decoder Network. Creates a position encoding for queries and composes basic decoder. e.g. the positional decoder can be used to do MLM, classification, or regression. Currently only supports positional decoding. Use `self.inputs` for inputs. Attributes: inputs: A `Dict[Text, tf.keras.Input]` with `latent_output` and `input_mask`. The shape of `latent_output` is shape `(z_index_dim, d_latents)` with dtype `tf.float32` and `input_mask` is shape `(None)` with dtype `tf.int32`. """ def __init__(self, decoder, output_index_dim, z_index_dim, d_latents, d_model, position_encoding_intializer_stddev=0.02, name='positional_decoder', **kwargs): """Init. Args: decoder: Instance of perceiver `Decoder`. output_index_dim: Sequence length for the query encoding. z_index_dim: Latent index dimension. d_latents: Latent last dimension. d_model: Model last dimension. position_encoding_intializer_stddev: `stddev` of `tf.keras.initializers.TruncatedNormal` used for the learned position embedding table kernel initializer. name: Sets the `tf.keras.layers.Layer` name. **kwargs: Any keyword arguments to pass through to `tf.keras.layers.Layer`. """ super().__init__(**kwargs, name=name) self._decoder = decoder self._output_index_dim = output_index_dim self._z_index_dim = z_index_dim self._d_latents = d_latents self._d_model = d_model self._output_pos_enc = self._create_decoder_query( position_encoding_intializer_stddev) self.inputs = dict( latent_output=tf.keras.Input( shape=(self._z_index_dim, self._d_latents), dtype=tf.float32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32)) def _create_decoder_query(self, position_encoding_intializer_stddev): """Create the position encoding for the output query.""" return layers.PositionEmbedding( max_length=self._output_index_dim, name='decoder_pos_enc', initializer=tf.keras.initializers.TruncatedNormal( stddev=position_encoding_intializer_stddev)) def call(self, inputs, training=None): """Return decoded output of latent vector. Uses the positional encoding as query for the decoder and uses the `latent_output` as key-value for the decoder. Args: inputs: A `Dict[Text, tf.keras.Input]` with `latent_output` and `input_mask`. The shape of `latent_output` is shape `(z_index_dim, d_latents)` with dtype `tf.float32` and `input_mask` is shape `(None)` with dtype `tf.int32`. training: Flag to indicate training status. Default is `None`. It is passed to the decoder as is. Returns: `Dict[Text, tf.Tensor]` decoded `sequence_output` of a latent vector. """ if not isinstance(inputs, dict): raise ValueError(f'Unexpected inputs type to {self.__class__}.') latent_output = inputs['latent_output'] query_mask = inputs.get('input_mask') decoder_query = self._output_pos_enc(tf.ones( (tf.shape(latent_output)[0], self._output_index_dim, self._d_model), dtype=latent_output.dtype)) z = latent_output sequence_output = self._decoder( [decoder_query, z], query_mask=query_mask, training=training) return dict(sequence_output=sequence_output)
4,361
33.078125
78
py
models
models-master/official/projects/perceiver/modeling/networks/sequence_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for sequence_encoder.""" import numpy as np import tensorflow as tf from official.projects.perceiver.configs import encoders from official.projects.perceiver.configs import perceiver from official.projects.perceiver.modeling.layers import encoder from official.projects.perceiver.modeling.networks import sequence_encoder class SequenceEncoderTest(tf.test.TestCase): def _create_small_network( self, sequence_length, z_index_dim, d_latents, vocab_size=100): d_model = 64 num_layers = 2 encoder_cfg = perceiver.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = perceiver.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, z_index_dim=z_index_dim, max_seq_len=sequence_length, vocab_size=vocab_size, encoder=encoder_cfg) return encoders.build_encoder(sequence_encoder_cfg) def test_dict_outputs_network_creation(self): sequence_length = 21 z_index_dim = 128 d_latents = 48 test_network = self._create_small_network( sequence_length=sequence_length, z_index_dim=z_index_dim, d_latents=d_latents) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["latent_output"] expected_data_shape = [None, z_index_dim, d_latents] self.assertAllEqual(expected_data_shape, data.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) def test_dict_outputs_network_invocation(self): num_types = 7 vocab_size = 57 sequence_length = 21 z_index_dim = 128 d_latents = 48 test_network = self._create_small_network( sequence_length=sequence_length, z_index_dim=z_index_dim, d_latents=d_latents, vocab_size=vocab_size) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["latent_output"] # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], d_latents) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. sequence_length = 21 vocab_size = 57 d_model = 64 d_latents = 48 z_index_dim = 128 num_layers = 2 encoder_cfg = perceiver.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_config = perceiver.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, z_index_dim=z_index_dim, max_seq_len=sequence_length, vocab_size=vocab_size, encoder=encoder_cfg) encoder_ = encoder.Encoder( **sequence_encoder_config.encoder.as_dict()) network = sequence_encoder.SequenceEncoder( encoder=encoder_, d_model=sequence_encoder_config.d_model, d_latents=sequence_encoder_config.d_latents, z_index_dim=sequence_encoder_config.z_index_dim, max_seq_len=sequence_encoder_config.max_seq_len, vocab_size=sequence_encoder_config.vocab_size, z_pos_enc_init_scale=sequence_encoder_config.z_pos_enc_init_scale, embedding_width=sequence_encoder_config.embedding_width, embedding_initializer_stddev=sequence_encoder_config .embedding_initializer_stddev, input_position_encoding_intializer_stddev=sequence_encoder_config .input_position_encoding_intializer_stddev) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["latent_output"] # Create a model based off of this network: # model = _ = tf.keras.Model([word_ids, mask, type_ids], [data]) # TODO(b/222634115) make save work. # Tests model saving/loading. # model_path = self.get_temp_dir() + "/model" # model.save(model_path) # _ = tf.keras.models.load_model(model_path) # TODO(b/222634115) add test coverage. if __name__ == "__main__": tf.test.main()
6,083
37.264151
80
py
models
models-master/official/projects/perceiver/modeling/networks/sequence_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver sequence encoder.""" from typing import Optional, Dict import tensorflow as tf from official.nlp.modeling import layers class SequenceEncoder(tf.keras.layers.Layer): """Perceiver encoder for sequences. Assumes positional learned encoding for latent inputs and embeddings. Creates an embedding table with vocab size. It uses the perceiver encode processor to encode the input and process the latent representation. It can be pretrained on masked LM and reused for fine-tuning. Use `self.inputs` for inputs. """ def __init__(self, encoder: tf.keras.layers.Layer, d_model: int, d_latents: int, z_index_dim: int, max_seq_len: int, vocab_size: int, z_pos_enc_init_scale: float = 0.02, embedding_width: Optional[int] = None, embedding_initializer_stddev: float = 0.02, input_position_encoding_intializer_stddev: float = 0.02, name: str = 'sequence_encoder', **kwargs): """Init. Args: encoder: Instance of perceiver `Encoder`. d_model: Last dimension size of the input and output tensors. e.g. `[batch_size, max_seq_len, d_model]`. d_latents: Last dimension size of the latent tensors. e.g. `[batch_size, z_index_dim, d_latents]`. z_index_dim: Second dimension size of the latent tensors. e.g. `[batch_size, z_index_dim, d_latents]`. max_seq_len: Second dimension size of the input and outputs tensors. e.g. `[batch_size, max_seq_len, d_model]`. vocab_size: Vocabulary size of the embedding table. z_pos_enc_init_scale: Latent array's positional encoding's truncated_normal initializer's `stddev`. embedding_width: Embedding dimension of the embedding table. embedding_initializer_stddev: `stddev` of `tf.keras.initializers.TruncatedNormal` used for the embedding table kernel initializer. input_position_encoding_intializer_stddev: `stddev` of `tf.keras.initializers.TruncatedNormal` used for the learned position embedding table kernel initializer. name: Sets the `tf.keras.layers.Layer` name. **kwargs: Any keyword arguments to pass through to `tf.keras.layers.Layer`. """ super().__init__(**kwargs, name=name) self._embedding_width = embedding_width self._encoder = encoder self._d_model = d_model self._z_index_dim = z_index_dim self._d_latents = d_latents if self._embedding_width is None: self._embedding_width = self._d_model # Construct the embeddling layer for the sequence vocab. self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=self._embedding_width, initializer=tf.keras.initializers.TruncatedNormal( stddev=embedding_initializer_stddev), name='word_embeddings') # Construct the input positional encoding layer. self._input_pos_encoding = layers.PositionEmbedding( max_length=max_seq_len, initializer=tf.keras.initializers.TruncatedNormal( stddev=input_position_encoding_intializer_stddev), name='input_pos_encoding') # Construct the latent array initial state. self._z_pos_enc = layers.PositionEmbedding( max_length=z_index_dim, initializer=tf.keras.initializers.TruncatedNormal( stddev=z_pos_enc_init_scale), name='z_pos_enc') self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32)) def get_embedding_table(self) -> tf.Variable: """Get embedding table.""" return self._embedding_layer.embeddings def call(self, inputs: Dict[str, tf.Tensor], training: Optional[bool] = None) -> Dict[str, tf.Tensor]: """Return encoded and processed latent output of inputs. Args: inputs: Expect inputs to be a dictionary of `input_word_ids` and `input_mask`. training: Flag to indicate training status. Returns: `Dict[str, tf.Tensor]` decoded output of latent vector via the query. """ if not isinstance(inputs, dict): raise ValueError('Unexpected inputs type to %s.' % self.__class__) word_ids = inputs['input_word_ids'] input_mask = inputs.get('input_mask') word_embeddings = self._embedding_layer(word_ids) pos_encodings = self._input_pos_encoding(word_embeddings) embeddings = word_embeddings + pos_encodings tensor_for_shape = tf.ones( [tf.shape(embeddings)[0], self._z_index_dim, self._d_latents], dtype=embeddings.dtype) encoder_query = self._z_pos_enc(tensor_for_shape) z = self._encoder( [embeddings, encoder_query], input_mask=input_mask, training=training) return dict(latent_output=z)
5,706
35.350318
79
py
models
models-master/official/projects/perceiver/modeling/networks/positional_decoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for positional_decoder.""" import tensorflow as tf from official.projects.perceiver.configs import perceiver as cfg from official.projects.perceiver.modeling.layers import decoder from official.projects.perceiver.modeling.networks import positional_decoder class PositionalDecoderTest(tf.test.TestCase): def test_dict_outputs_network_creation(self): sequence_length = 21 z_index_dim = 8 d_model = 64 d_latents = 48 decoder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents, num_heads=2) positional_decoder_cfg = cfg.PositionalDecoder( decoder=decoder_cfg, d_model=d_model, d_latents=d_latents, output_index_dim=sequence_length, z_index_dim=z_index_dim) decoder_ = decoder.Decoder(positional_decoder_cfg.decoder.as_dict()) mlm_decoder = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=positional_decoder_cfg.output_index_dim, z_index_dim=positional_decoder_cfg.z_index_dim, d_latents=positional_decoder_cfg.d_latents, d_model=positional_decoder_cfg.d_model) # Create the inputs (note that the first dimension is implicit). latent_output = tf.keras.Input( shape=(z_index_dim, d_latents), dtype=tf.float32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = mlm_decoder( dict(latent_output=latent_output, input_mask=mask)) data = dict_outputs["sequence_output"] expected_data_shape = [None, sequence_length, d_model] self.assertAllEqual(expected_data_shape, data.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. sequence_length = 21 z_index_dim = 8 d_model = 64 d_latents = 48 decoder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents, num_heads=2) positional_decoder_cfg = cfg.PositionalDecoder( decoder=decoder_cfg, d_model=d_model, d_latents=d_latents, output_index_dim=sequence_length, z_index_dim=z_index_dim) decoder_ = decoder.Decoder(positional_decoder_cfg.decoder.as_dict()) mlm_decoder = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=positional_decoder_cfg.output_index_dim, z_index_dim=positional_decoder_cfg.z_index_dim, d_latents=positional_decoder_cfg.d_latents, d_model=positional_decoder_cfg.d_model) # Create the inputs (note that the first dimension is implicit). latent_output = tf.keras.Input( shape=(z_index_dim, d_latents), dtype=tf.float32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = mlm_decoder( dict(latent_output=latent_output, input_mask=mask)) data = dict_outputs["sequence_output"] # Create a model based off of this network: # model = _ = tf.keras.Model([latent_output, mask], [data]) # TODO(b/222634115) make save work. # Tests model saving/loading. # model_path = self.get_temp_dir() + "/model" # model.save(model_path) # _ = tf.keras.models.load_model(model_path) # TODO(b/222634115) add test coverage. if __name__ == "__main__": tf.test.main()
4,025
35.27027
76
py
models
models-master/official/projects/perceiver/modeling/models/pretrainer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Perceiver pretrainer model.""" import itertools from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.projects.perceiver.configs import encoders from official.projects.perceiver.configs import perceiver as cfg from official.projects.perceiver.modeling.layers import decoder from official.projects.perceiver.modeling.models import pretrainer from official.projects.perceiver.modeling.networks import positional_decoder class PretrainerTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(itertools.product( (False, True), (False, True), )) def test_perceiver_pretrainer(self, use_customized_masked_lm, has_masked_lm_positions): """Validate that the Keras object can be created.""" # Build a transformer network to use within the Perceiver trainer. vocab_size = 100 sequence_length = 512 d_model = 64 d_latents = 48 num_layers = 2 encoder_cfg = cfg.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = cfg.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, vocab_size=vocab_size, encoder=encoder_cfg) test_network = encoders.build_encoder(sequence_encoder_cfg) _ = test_network(test_network.inputs) deocder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents) perceiver_mlm_decoder_cfg = cfg.MaskedLMDecoderConfig( d_model=d_model, decoder=deocder_cfg, d_latents=d_latents) decoder_ = decoder.Decoder( **perceiver_mlm_decoder_cfg.decoder.as_dict()) positional_decoder_ = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=perceiver_mlm_decoder_cfg.output_index_dim, z_index_dim=perceiver_mlm_decoder_cfg.z_index_dim, d_latents=perceiver_mlm_decoder_cfg.d_latents, d_model=perceiver_mlm_decoder_cfg.d_model, position_encoding_intializer_stddev=perceiver_mlm_decoder_cfg .position_encoding_intializer_stddev) if use_customized_masked_lm: customized_masked_lm = layers.MaskedLM( embedding_table=test_network.get_embedding_table()) else: customized_masked_lm = None # Create a Perceiver trainer with the created network. perceiver_trainer_model = pretrainer.Pretrainer( encoder=test_network, decoder=positional_decoder_, customized_masked_lm=customized_masked_lm) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). inputs = dict( input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)) if has_masked_lm_positions: inputs['masked_lm_positions'] = tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = perceiver_trainer_model(inputs) expected_keys = ['sequence_output'] if has_masked_lm_positions: expected_keys.append('mlm_logits') self.assertSameElements(outputs.keys(), expected_keys) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] if has_masked_lm_positions: self.assertAllEqual(expected_lm_shape, outputs['mlm_logits'].shape.as_list()) expected_sequence_output_shape = [None, sequence_length, d_model] self.assertAllEqual(expected_sequence_output_shape, outputs['sequence_output'].shape.as_list()) def test_serialize_deserialize(self): """Validate that the trainer can be serialized and deserialized.""" vocab_size = 100 d_model = 64 d_latents = 48 num_layers = 2 encoder_cfg = cfg.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = cfg.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, vocab_size=vocab_size, encoder=encoder_cfg) test_network = encoders.build_encoder(sequence_encoder_cfg) _ = test_network(test_network.inputs) deocder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents) perceiver_mlm_decoder_cfg = cfg.MaskedLMDecoderConfig( d_model=d_model, decoder=deocder_cfg, d_latents=d_latents) decoder_ = decoder.Decoder( **perceiver_mlm_decoder_cfg.decoder.as_dict()) positional_decoder_ = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=perceiver_mlm_decoder_cfg.output_index_dim, z_index_dim=perceiver_mlm_decoder_cfg.z_index_dim, d_latents=perceiver_mlm_decoder_cfg.d_latents, d_model=perceiver_mlm_decoder_cfg.d_model, position_encoding_intializer_stddev=perceiver_mlm_decoder_cfg .position_encoding_intializer_stddev) # Create a Perceiver trainer with the created network. perceiver_trainer_model = pretrainer.Pretrainer( encoder=test_network, decoder=positional_decoder_) config = perceiver_trainer_model.get_config() new_perceiver_trainer_model = pretrainer.Pretrainer.from_config(config) # If the serialization was successful, the new config should match the old. self.assertAllEqual(perceiver_trainer_model.get_config(), new_perceiver_trainer_model.get_config()) # TODO(b/222634115) add test coverage. if __name__ == '__main__': tf.test.main()
6,423
37.933333
80
py
models
models-master/official/projects/perceiver/modeling/models/classifier_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for classifier.""" from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.projects.perceiver.configs import encoders from official.projects.perceiver.configs import perceiver as cfg from official.projects.perceiver.modeling.layers import decoder from official.projects.perceiver.modeling.models import classifier from official.projects.perceiver.modeling.networks import positional_decoder class ClassifierTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('single_cls', 1), ('3_cls', 3)) def test_perceiver_trainer(self, num_classes): """Validate that the Keras object can be created.""" # Build a perceiver sequence encoder network to use within the perceiver # trainer. vocab_size = 100 sequence_length = 512 d_model = 64 d_latents = 48 num_layers = 2 encoder_cfg = cfg.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = cfg.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, vocab_size=vocab_size, encoder=encoder_cfg) test_network = encoders.build_encoder(sequence_encoder_cfg) deocder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents) perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig( d_model=d_model, decoder=deocder_cfg, d_latents=d_latents) decoder_ = decoder.Decoder( **perceiver_classification_decoder_cfg.decoder.as_dict()) positional_decoder_ = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=perceiver_classification_decoder_cfg.output_index_dim, z_index_dim=perceiver_classification_decoder_cfg.z_index_dim, d_latents=perceiver_classification_decoder_cfg.d_latents, d_model=perceiver_classification_decoder_cfg.d_model, position_encoding_intializer_stddev=perceiver_classification_decoder_cfg .position_encoding_intializer_stddev) # Create a classifier with the created network. trainer_model = classifier.Classifier( network=test_network, decoder=positional_decoder_, num_classes=num_classes) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. cls_outs = trainer_model({ 'input_word_ids': word_ids, 'input_mask': mask, 'input_type_ids': type_ids}) # Validate that the outputs are of the expected shape. expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) @parameterized.named_parameters( ('single_cls', 1, False), ('2_cls', 2, False), ('single_cls_custom_head', 1, True), ('2_cls_custom_head', 2, True)) def test_perceiver_trainer_tensor_call(self, num_classes, use_custom_head): """Validate that the Keras object can be invoked.""" # Build a perceiver sequence encoder network to use within the perceiver # trainer. vocab_size = 100 d_model = 64 d_latents = 48 num_layers = 2 encoder_cfg = cfg.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = cfg.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, vocab_size=vocab_size, encoder=encoder_cfg) test_network = encoders.build_encoder(sequence_encoder_cfg) deocder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents) perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig( d_model=d_model, decoder=deocder_cfg, d_latents=d_latents) decoder_ = decoder.Decoder( **perceiver_classification_decoder_cfg.decoder.as_dict()) positional_decoder_ = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=perceiver_classification_decoder_cfg.output_index_dim, z_index_dim=perceiver_classification_decoder_cfg.z_index_dim, d_latents=perceiver_classification_decoder_cfg.d_latents, d_model=perceiver_classification_decoder_cfg.d_model, position_encoding_intializer_stddev=perceiver_classification_decoder_cfg .position_encoding_intializer_stddev) cls_head = layers.GaussianProcessClassificationHead( inner_dim=0, num_classes=num_classes) if use_custom_head else None # Create a classifier with the created network. trainer_model = classifier.Classifier( network=test_network, decoder=positional_decoder_, cls_head=cls_head, num_classes=num_classes) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = trainer_model({ 'input_word_ids': word_ids, 'input_mask': mask, 'input_type_ids': type_ids}) @parameterized.named_parameters( ('default_cls_head', None), ('sngp_cls_head', layers.GaussianProcessClassificationHead( inner_dim=0, num_classes=4))) def test_serialize_deserialize(self, cls_head): """Validate that the trainer can be serialized and deserialized.""" del cls_head vocab_size = 100 d_model = 64 d_latents = 48 num_layers = 2 encoder_cfg = cfg.EncoderConfig( v_last_dim=d_latents, num_self_attends_per_block=num_layers) sequence_encoder_cfg = cfg.SequenceEncoderConfig( d_model=d_model, d_latents=d_latents, vocab_size=vocab_size, encoder=encoder_cfg) test_network = encoders.build_encoder(sequence_encoder_cfg) deocder_cfg = cfg.DecoderConfig( output_last_dim=d_latents, v_last_dim=d_latents) perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig( d_model=d_model, decoder=deocder_cfg, d_latents=d_latents) decoder_ = decoder.Decoder( **perceiver_classification_decoder_cfg.decoder.as_dict()) positional_decoder_ = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=perceiver_classification_decoder_cfg.output_index_dim, z_index_dim=perceiver_classification_decoder_cfg.z_index_dim, d_latents=perceiver_classification_decoder_cfg.d_latents, d_model=perceiver_classification_decoder_cfg.d_model, position_encoding_intializer_stddev=perceiver_classification_decoder_cfg .position_encoding_intializer_stddev) # Create a classifier with the created network. trainer_model = classifier.Classifier( network=test_network, decoder=positional_decoder_, num_classes=4) # Create another trainer via serialization and deserialization. config = trainer_model.get_config() new_trainer_model = classifier.Classifier.from_config(config) # If the serialization was successful, the new config should match the old. self.assertAllEqual(trainer_model.get_config(), new_trainer_model.get_config()) # TODO(b/222634115) add test coverage. if __name__ == '__main__': tf.test.main()
8,478
39.184834
80
py
models
models-master/official/projects/perceiver/modeling/models/classifier.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver classifier.""" import numpy as np import tensorflow as tf from official.nlp.modeling import layers class Classifier(tf.keras.Model): """Classifier model based on a shared encoder and optional decoder. This is an implementation of the network structure surrounding a transformer encoder as described in "Perceiver IO: A General Architecture for Structured Inputs & Outputs" (https://arxiv.org/abs/2107.14795). The Classifier allows a user to pass in an encoder stack and an optional decoder stack (e.g. perceiver decoder), and instantiates a classification network based on the passed `num_classes` argument. If `num_classes` is set to 1, a regression network is instantiated. This is forked from (https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_classifier.py) Attributes: network: A perceiver encode and processor transformer network. This network should output a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes outputted by classification head. inputs: A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and `input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`. head_name: Name of the classification head. classifier: Classification head layer. initializer: `tf.keras.initializers.Initializer` used for classification head layer. """ def __init__(self, network, num_classes, decoder=None, initializer=None, dropout_rate=0.0, head_name='glue', cls_head=None, name='classifier', **kwargs): """Init. Args: network: A perceiver encode and processor transformer network. This network should output a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. decoder: A perceiver decoder network. This network should accept the latent output of the encoder and emits logits. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. dropout_rate: The dropout probability of the cls head. head_name: Name of the classification head. cls_head: (Optional) The layer instance to use for the classifier head. It should take in the output from network and produce the final logits. If set, the arguments ('num_classes', 'initializer', 'dropout_rate', 'use_encoder_pooler', 'head_name') will be ignored. name: Sets the `tf.keras.Model` name. **kwargs: Any keyword arguments to pass through to `tf.keras.Model`. """ super().__init__(name=name, **kwargs) self._config = { 'network': network, 'decoder': decoder, 'num_classes': num_classes, 'initializer': initializer, 'dropout_rate': dropout_rate, 'head_name': head_name, 'cls_head': cls_head, 'name': name, } self.num_classes = num_classes self.head_name = head_name self.initializer = initializer self._decoder = decoder self._network = network inputs = self._network.inputs outputs = self._network(inputs) if 'sequence_output' not in outputs: if 'latent_output' in outputs and self._decoder is not None: decoder_inputs = { 'latent_output': outputs['latent_output'], 'input_mask': inputs['input_mask'], } decoder_outputs = self._decoder(decoder_inputs) sequence_output = decoder_outputs['sequence_output'] else: raise ValueError('if `sequence_output` is not in encoder output, ' '`latent_output` must be in encoder output and' 'decoder must exist.') else: sequence_output = outputs['sequence_output'] cls_inputs = sequence_output if initializer is None: stddev = 1. / np.sqrt(cls_inputs.shape[-1]) initializer = tf.keras.initializers.TruncatedNormal(stddev=stddev) if cls_head: classifier = cls_head else: classifier = layers.ClassificationHead( inner_dim=cls_inputs.shape[-1], num_classes=num_classes, initializer=initializer, dropout_rate=dropout_rate, name=head_name) _ = classifier(cls_inputs) self.inputs = inputs self._cls_head = cls_head self._name = name self.classifier = classifier def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Return perceiver classifier model output tensors in a dict. Accepts inputs as dictionary of tensors. Args: inputs: A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and `input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`. Returns: `tf.Tensor` classification output. """ if not isinstance(inputs, dict): raise ValueError(f'Unexpected inputs type to {self.__class__}.') word_ids = inputs['input_word_ids'] input_type_ids = inputs.get('input_type_ids') input_mask = inputs.get('input_mask') encoder_inputs = { 'input_word_ids': word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids, } encoder_outputs = self._network(encoder_inputs) if 'sequence_output' not in encoder_outputs: if 'latent_output' in encoder_outputs: z = encoder_outputs['latent_output'] decoder_inputs = {'latent_output': z, 'input_mask': input_mask} decoder_output = self._decoder(decoder_inputs) outputs = dict() if isinstance(decoder_output, dict): outputs = decoder_output else: raise ValueError('decoder\'s output should be a dict,' f'but got {decoder_output}') else: raise ValueError('If `sequence_output` is not in encoder output,' '`latent_output` must be in encoder output.') else: outputs = encoder_outputs return self.classifier(outputs['sequence_output']) @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" items = dict(encoder=self._network, decoder=self._decoder) if hasattr(self.classifier, 'checkpoint_items'): for key, item in self.classifier.checkpoint_items.items(): items['.'.join([self.classifier.name, key])] = item return items def get_config(self): """Return the configuration to set up this object using `from_config`.""" return self._config @classmethod def from_config(cls, config, custom_objects=None): """Initialize object using config from `get_config`. https://www.tensorflow.org/api_docs/python/tf/keras/models/model_from_config Args: config: Return the configuration to set up this object. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). """ return cls(**config)
8,091
34.336245
100
py
models
models-master/official/projects/perceiver/modeling/models/pretrainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver networks.""" import copy import tensorflow as tf from official.nlp.modeling import layers class Pretrainer(tf.keras.Model): """Perceiver Pretrainer. Adds the masked language model head upon the encoder output. Optionally incorporates decoder output. Forked from (https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_pretrainer.py) Attributes: encoder: A perceiver encode and processor transformer network. This network should output a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. masked_lm: Masked language model network head for language modeling with encoder and optionally decoded output. inputs: A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and `input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`. If `masked_lm_positions` is included, it will run masked language modeling layer to return sequence of logits. """ def __init__(self, encoder, decoder=None, mlm_activation=None, mlm_initializer='glorot_uniform', customized_masked_lm=None, name='pretrainer', **kwargs): """Init. Args: encoder: A perceiver encode and processor transformer network. It should expose its embedding table via a "get_embedding_table" method. Decoder won't be used if `sequence_output` is in the output of the encoder. decoder: A perceiver decoder network. This parameter is optional. This layer accepts the latent output of the encoder and emits logits. Decoder must accept a dictionary of `latent_output` and `input_mask` as inputs. This will not be used if `sequence_output` is an output from `encoder`. mlm_activation: The activation (if any) to use in the masked LM network. If `None`, no activation will be used. mlm_initializer: The initializer (if any) to use in the masked LM. Default to a Glorot uniform initializer. customized_masked_lm: A customized masked_lm layer. If None, will create a standard layer from `layers.MaskedLM`; if not None, will use the specified masked_lm layer. Above arguments `mlm_activation` and `mlm_initializer` will be ignored. name: Sets the `tf.keras.Model` name. **kwargs: Any keyword arguments to pass through to `tf.keras.Model`. """ super().__init__(**kwargs, name=name) self._config = { 'encoder': encoder, 'decoder': decoder, 'mlm_initializer': mlm_initializer, 'mlm_activation': mlm_activation, 'customized_masked_lm': customized_masked_lm, 'name': name, } self._decoder = decoder self.encoder = encoder encoder_inputs = self.encoder.inputs # Makes sure the weights are built. encoder_outputs = self.encoder(encoder_inputs) if 'sequence_output' not in encoder_outputs: if 'latent_output' in encoder_outputs and self._decoder is not None: decoder_inputs = { 'latent_output': encoder_outputs['latent_output'], 'input_mask': encoder_inputs['input_mask'], } decoder_outputs = self._decoder(decoder_inputs) if 'sequence_output' not in decoder_outputs: raise ValueError('`sequence_output` must be in decoder output.') else: raise ValueError('if `sequence_output` is not in encoder output, ' '`latent_output` must be in encoder output and' 'decoder must exist.') encoder_inputs = copy.copy(self.encoder.inputs) inputs = dict(encoder_inputs) if self._decoder is not None: inputs.update(copy.copy(self._decoder.inputs)) self.masked_lm = customized_masked_lm or layers.MaskedLM( embedding_table=self.encoder.get_embedding_table(), activation=mlm_activation, initializer=mlm_initializer, name='cls/predictions') masked_lm_positions = tf.keras.layers.Input( shape=(None,), name='masked_lm_positions', dtype=tf.int32) if isinstance(inputs, dict): inputs['masked_lm_positions'] = masked_lm_positions else: raise ValueError(f'Unexpected inputs type to {self.__class__}.') self.inputs = inputs def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Return perceiver pretrainer model output tensors in a dict. Accepts inputs as dictionary of tensors. Args: inputs: A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and `input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`. If `masked_lm_positions` is included, it will run masked language modeling layer to return sequence of logits. Returns: `Dict[str, tf.Tensor]` with `sequence_output` and optionally `mlm_logits`. """ if not isinstance(inputs, dict): raise ValueError(f'Unexpected inputs type to {self.__class__}.') word_ids = inputs['input_word_ids'] input_type_ids = inputs.get('input_type_ids') input_mask = inputs.get('input_mask') encoder_inputs = { 'input_word_ids': word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids, } encoder_outputs = self.encoder(encoder_inputs) if 'sequence_output' not in encoder_outputs: if 'latent_output' in encoder_outputs: z = encoder_outputs['latent_output'] decoder_inputs = {'latent_output': z, 'input_mask': input_mask} decoder_output = self._decoder(decoder_inputs) outputs = dict() if isinstance(decoder_output, dict): outputs = decoder_output else: raise ValueError('decoder\'s output should be a dict,' f'but got {decoder_output}') else: raise ValueError('If `sequence_output` is not in encoder output,' '`latent_output` must be in encoder output.') else: outputs = encoder_outputs sequence_output = outputs['sequence_output'] # Inference may not have masked_lm_positions and mlm_logits is not needed. if 'masked_lm_positions' in inputs: masked_lm_positions = inputs['masked_lm_positions'] outputs['mlm_logits'] = self.masked_lm( sequence_output, masked_positions=masked_lm_positions) return outputs @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" items = dict( encoder=self.encoder, masked_lm=self.masked_lm, decoder=self._decoder) return items def get_config(self): """Return the configuration to set up this object using `from_config`.""" return self._config @classmethod def from_config(cls, config, custom_objects=None): """Initialize object using config from `get_config`. https://www.tensorflow.org/api_docs/python/tf/keras/models/model_from_config Args: config: Return the configuration to set up this object. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). """ return cls(**config)
8,096
35.972603
100
py
models
models-master/official/projects/perceiver/modeling/layers/decoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for decoder.""" import numpy as np import tensorflow as tf from official.projects.perceiver.modeling.layers import decoder class PerceiverBasicDecoderTest(tf.test.TestCase): def test_layer_creation(self): sequence_length = 80 embedding_width = 800 test_layer = decoder.Decoder( output_last_dim=embedding_width, num_heads=8) lantent_length = 8 latent_width = 80 query_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) output_tensor = test_layer((query_input, latent_input)) self.assertEqual( query_input.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_mask(self): embedding_width = 800 sequence_length = 80 test_layer = decoder.Decoder( output_last_dim=embedding_width, num_heads=8) lantent_length = 8 latent_width = 80 query_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) mask_tensor = tf.keras.Input( shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer( (query_input, latent_input), query_mask=mask_tensor) self.assertEqual( query_input.shape.as_list(), output_tensor.shape.as_list()) def test_layer_invocation(self): embedding_width = 800 sequence_length = 80 test_layer = decoder.Decoder( output_last_dim=embedding_width, num_heads=8) lantent_length = 8 latent_width = 80 query_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) mask_tensor = tf.keras.Input( shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer( (query_input, latent_input), query_mask=mask_tensor) # Create a model from the test layer. model = tf.keras.Model( ((query_input, latent_input), mask_tensor), output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 latent_data = 10 * np.random.random_sample( (batch_size, lantent_length, latent_width)) mask_data = tf.ones((batch_size, sequence_length), dtype=tf.int32) query_data = tf.ones( (batch_size, sequence_length, embedding_width), dtype=tf.float32) _ = model.predict(((query_data, latent_data), mask_data)) # TODO(b/222634115) Add tests to validate logic and dims. if __name__ == "__main__": tf.test.main()
3,391
31.615385
79
py
models
models-master/official/projects/perceiver/modeling/layers/utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver modeling utils.""" import functools import tensorflow as tf def make_cross_attention_mask(query_mask, kv_mask): """Compute the outer product between `query_mask` and `kv_mask`.""" # Porting `mask = jax.vmap(jnp.outer)(query_mask, kv_mask)` return tf.einsum("ab,ac->abc", query_mask, kv_mask) def build_cross_attention_block_args( input_shape, widening_factor=1, dropout_prob=0.0, dropout_attn_prob=0.0, num_heads=8, att_init_scale=1.0, dense_init_scale=1.0, shape_for_attn="kv", use_query_residual=True, norm_epsilon=1e-5, qk_last_dim=None, v_last_dim=None): """Builds cross attention block arguments for `TransformerEncoderBlock`. Build cross attention block arguments for `TransformerEncoderBlock` used in Perceiver. The last dimension of the output of the attention block or `output_last_dim` of `TransformerEncoderBlocks` is set to the first `input_shape`'s last dimension. `diff_q_kv_att_layer_norm` is set to `True`. `inner_dropout` is set to 0. `norm_first` is set to `True`. `inner_activation` is set to gelu. `kernel_initializer` and `attention_initializer` are both `tf.keras.initializers.VarianceScaling`. Args: input_shape: Check `input_shape` doc in `_build_transformer_encoder_block_args`. widening_factor: Check `widening_factor` doc in `_build_transformer_encoder_block_args`. dropout_prob: Check `dropout_prob` doc in `_build_transformer_encoder_block_args`. dropout_attn_prob: Check `dropout_attn_prob` doc in `_build_transformer_encoder_block_args`. num_heads: Check `num_heads` doc in `_build_transformer_encoder_block_args`. att_init_scale: Check `att_init_scale` doc in `_build_transformer_encoder_block_args`. dense_init_scale: Check `dense_init_scale` doc in `_build_transformer_encoder_block_args`. shape_for_attn: Valid values are `q` or `kv`. This value is used to determine the last dimension of the attention score output attention last dimension. `qk_last_dim` has higher precedence over `shape_for_attn`. use_query_residual: Toggle to execute residual connection after attention. norm_epsilon: Check `norm_epsilon` doc in `_build_transformer_encoder_block_args`. qk_last_dim: When set, determines the last dimension of the attention score output. When it's `None`, it uses the first `input_shape`'s last dimension as the last dimension of the attention score output. `qk_last_dim` has higher precedence over `shape_for_attn`. v_last_dim: Check `v_last_dim` doc in `_build_transformer_encoder_block_args`. Returns: A `dict` mapping `TransformerEncoderBlock` arguments. References: [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206) (https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py) (https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py) """ inputs_q_shape = input_shape[0] inputs_kv_shape = input_shape[1] output_last_dim = inputs_q_shape[-1] if shape_for_attn == "q": f_qk_last_dim = inputs_q_shape[-1] elif shape_for_attn == "kv": f_qk_last_dim = inputs_kv_shape[-1] else: raise ValueError(f"Unknown value {shape_for_attn} for " "shape_for_attention.") f_v_last_dim = None if qk_last_dim is not None: f_qk_last_dim = qk_last_dim if v_last_dim is not None: f_v_last_dim = v_last_dim return _build_transformer_encoder_block_args( input_shape=input_shape, widening_factor=widening_factor, dropout_prob=dropout_prob, dropout_attn_prob=dropout_attn_prob, num_heads=num_heads, att_init_scale=att_init_scale, dense_init_scale=dense_init_scale, use_query_residual=use_query_residual, norm_epsilon=norm_epsilon, qk_last_dim=f_qk_last_dim, v_last_dim=f_v_last_dim, diff_q_kv_att_layer_norm=True, output_last_dim=output_last_dim) def build_self_attention_block_args( input_shape, widening_factor=4, dropout_prob=0.0, dropout_attn_prob=0.0, num_heads=8, att_init_scale=1.0, dense_init_scale=1.0, norm_epsilon=1e-5, qk_last_dim=None, v_last_dim=None): """Builds self attention block arguments for `TransformerEncoderBlock`. Light wrapper around `_build_transformer_encoder_block_args` with some assumptions around self attention block. Builds the arguments for `TransformerEncoderBlock` used in Perceiver. The last dimension of the output of the attention block or `output_last_dim` of `TransformerEncoderBlocks` is set using the logic described in the doc associated with `output_last_dim` in `_build_transformer_encoder_block_args`. `diff_q_kv_att_layer_norm` is set to `False`. `use_query_residual` is set to `True`. `inner_dropout` is set to 0. `norm_first` is set to `True`. `inner_activation` is set to gelu. `kernel_initializer` and `attention_initializer` are both `tf.keras.initializers.VarianceScaling`. Args: input_shape: Check `input_shape` doc in `_build_transformer_encoder_block_args`. widening_factor: Check `widening_factor` doc in `_build_transformer_encoder_block_args`. dropout_prob: Check `dropout_prob` doc in `_build_transformer_encoder_block_args`. dropout_attn_prob: Check `dropout_attn_prob` doc in `_build_transformer_encoder_block_args`. num_heads: Check `num_heads` doc in `_build_transformer_encoder_block_args`. att_init_scale: Check `att_init_scale` doc in `_build_transformer_encoder_block_args`. dense_init_scale: Check `dense_init_scale` doc in `_build_transformer_encoder_block_args`. norm_epsilon: Check `norm_epsilon` doc in `_build_transformer_encoder_block_args`. qk_last_dim: Check `qk_last_dim` doc in `_build_transformer_encoder_block_args`. v_last_dim: Check `v_last_dim` doc in `_build_transformer_encoder_block_args`. Returns: A `dict` mapping `TransformerEncoderBlock` arguments. References: [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206) (https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py) (https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py) """ return _build_transformer_encoder_block_args( input_shape=input_shape, widening_factor=widening_factor, dropout_prob=dropout_prob, dropout_attn_prob=dropout_attn_prob, num_heads=num_heads, att_init_scale=att_init_scale, dense_init_scale=dense_init_scale, use_query_residual=True, norm_epsilon=norm_epsilon, qk_last_dim=qk_last_dim, v_last_dim=v_last_dim, diff_q_kv_att_layer_norm=False, output_last_dim=None) def _build_transformer_encoder_block_args( input_shape, widening_factor, dropout_prob, dropout_attn_prob, num_heads, att_init_scale, dense_init_scale, use_query_residual, norm_epsilon, qk_last_dim, v_last_dim, diff_q_kv_att_layer_norm, output_last_dim): """Build arguments for `TransformerEncoderBlock`. `inner_dropout` is set to 0. `norm_first` is set to `True`. `inner_activation` is set to gelu. `kernel_initializer` and `attention_initializer` are both `tf.keras.initializers.VarianceScaling`. Args: input_shape: input shape(s). Usually passed through `build` method in `tf.keras.layers.Layer`. widening_factor: Multiplier used to widen on the inner layer of the MLP step within a transformer attention block. dropout_prob: Dropout probability for the post-attention and output dropout. dropout_attn_prob: Dropout probability for within the attention layer. num_heads: Number of attention heads. att_init_scale: Scale for the `tf.keras.initializers.VarianceScaling` used in attention kernel. dense_init_scale: Scale for the `tf.keras.initializers.VarianceScaling` used in MLP kernel. use_query_residual: Toggle to execute residual connection after attention. norm_epsilon: Epsilon value to initialize normalization layers. qk_last_dim: When set, determines the last dimension of the attention score output. When it's `None`, it uses the first `input_shape`'s last dimension as the last dimension of the attention score output. v_last_dim: When set, determines the value's last dimension in the multi-head attention. When it's `None`, it uses the `qk_last_dim` for `inner_dim` and `value_dim`. If `qk_last_dim` is `None`, the first input_shape's last dimension is used as the last dimension of the attention score output. If `output_last_dim` is `None`, `v_last_dim` is used to set the `TransformerEncoderBlock`'s output's last dimension. diff_q_kv_att_layer_norm: If `True`, create a separate attention layer norm layer for query and key-value if `norm_first` is `True`. Invalid to set to `True` if `norm_first` is `False`. output_last_dim: When set, the value determines the last dimension of the output of the attention block or `output_last_dim`. When it's `None`, it uses, in order of decreasing precedence, `v_last_dim`, `qk_last_dim`, and finally first `input_shape`'s last dimension. To clarify, if `v_last_dim` or `qk_last_dim` is `None`, the next order of precedence is used. The value is used to determine the last dimension of the output of the attention block or `output_last_dim`. Returns: A `dict` mapping `TransformerEncoderBlock` arguments. References: [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206) (https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py) (https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py) """ inputs_q_shape = input_shape[0] # Q and K must have the same number of last dim. # Default to preserving Q's input's shape. if qk_last_dim is None: qk_last_dim = inputs_q_shape[-1] # V's number of last dim determines the shape of the output of QKV-attention. # Default to the same number of last dim used in the key-query operation. if v_last_dim is None: v_last_dim = qk_last_dim # Project the output of QKV attention to a desired number of last dim. # Default to the same number as the output of the QKV attention operation. if output_last_dim is None: output_last_dim = v_last_dim assert qk_last_dim % num_heads == 0 assert v_last_dim % num_heads == 0 qk_last_dim_per_head = qk_last_dim // num_heads v_last_dim_per_head = v_last_dim // num_heads return { "num_attention_heads": num_heads, "inner_dim": output_last_dim * widening_factor, "inner_activation": functools.partial(tf.keras.activations.gelu, approximate=True), "kernel_initializer": tf.keras.initializers.VarianceScaling(scale=dense_init_scale), "attention_initializer": tf.keras.initializers.VarianceScaling(scale=att_init_scale), "norm_first": True, "norm_epsilon": norm_epsilon, "output_dropout": dropout_prob, "attention_dropout": dropout_attn_prob, "inner_dropout": 0.0, "use_query_residual": use_query_residual, "value_dim": v_last_dim_per_head, "key_dim": qk_last_dim_per_head, "output_last_dim": output_last_dim, "diff_q_kv_att_layer_norm": diff_q_kv_att_layer_norm, }
12,649
34.9375
146
py
models
models-master/official/projects/perceiver/modeling/layers/encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver encode processor.""" import tensorflow as tf from official.nlp.modeling import layers from official.projects.perceiver.modeling.layers import utils class Encoder(tf.keras.layers.Layer): """Perceiver Encoder and Processor(s) layer. This layer implements the Perceiver Encoder and Processor stack from "Perceiver: General Perception with Iterative Attention". (https://arxiv.org/abs/2103.03206) It uses SelfAttention and CrossAttention modules. It allows the user to choose the initial latent positional encodings. References: [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206) (https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py) (https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py) """ def __init__(self, self_attention_num_heads=8, self_attention_widening_factor=1, cross_attention_num_heads=8, cross_attention_widening_factor=1, num_self_attends_per_block=6, num_blocks=8, qk_last_dim=None, v_last_dim=None, dropout_prob=0.0, dropout_attn_prob=0.0, att_init_scale=1.0, dense_init_scale=1.0, norm_epsilon=1e-5, name="encode_processor", **kwargs): """Init. Args: self_attention_num_heads: Number of attention heads in the self-attention transformer block. self_attention_widening_factor: Multiplier used to widen on the inner layer of the MLP step within the self-attention transformer block. cross_attention_num_heads: Number of attention heads in the cross-attention transformer block. cross_attention_widening_factor: Multiplier used to widen on the inner layer of the MLP step within the cross-attention transformer block. num_self_attends_per_block: Number of different self-attention encoders initialized per latent perceiver block. num_blocks: Number of latent perceiver blocks. qk_last_dim: When set, determines the last dimension of the attention score output. Check `qk_last_dim` doc in `utils.build_cross_attention_block_args` for more details. v_last_dim: It can impact the last dimension size of value projection in mult-head attention output and `TransformerEncoderBlock`'s output. For more details, check `v_last_dim` doc in `utils._build_transformer_encoder_block_args`. dropout_prob: Dropout probability for the post-attention and output dropout. dropout_attn_prob: Dropout probability for within the attention layer. att_init_scale: Scale for the `tf.keras.initializers.VarianceScaling` used in attention kernel. dense_init_scale: Scale for the `tf.keras.initializers.VarianceScaling` used in MLP kernel. norm_epsilon: Epsilon value to initialize normalization layers. name: Sets the `tf.keras.layers.Layer` name. **kwargs: Any keyword arguments to pass through to `tf.keras.layers.Layer`. """ super().__init__(name=name, **kwargs) self._input_is_1d = True self._num_self_attends_per_block = num_self_attends_per_block self._dropout_prob = dropout_prob self._qk_last_dim = qk_last_dim self._v_last_dim = v_last_dim self._norm_epsilon = norm_epsilon self._dropout_attn_prob = dropout_attn_prob self._att_init_scale = att_init_scale self._dense_init_scale = dense_init_scale self._num_blocks = num_blocks self._self_attention_widening_factor = self_attention_widening_factor self._self_attention_num_heads = self_attention_num_heads self._cross_attention_widening_factor = cross_attention_widening_factor self._cross_attention_num_heads = cross_attention_num_heads self._cross_attention_shape_for_attn = "kv" self._cross_attention_use_query_residual = True def build(self, input_shape): embeddings_shape = input_shape[0] z_shape = input_shape[1] self._self_attention_encoder_blocks = [] for i in range(self._num_self_attends_per_block): self._self_attention_encoder_blocks.append(layers.TransformerEncoderBlock( name=f"self_attention_encoder_{i}", **utils.build_self_attention_block_args( (z_shape,), widening_factor=self._self_attention_widening_factor, dropout_prob=self._dropout_prob, dropout_attn_prob=self._dropout_attn_prob, num_heads=self._self_attention_num_heads, att_init_scale=self._att_init_scale, dense_init_scale=self._dense_init_scale, qk_last_dim=self._qk_last_dim, v_last_dim=self._v_last_dim, norm_epsilon=self._norm_epsilon))) self._cross_attention_encoder_block = layers.TransformerEncoderBlock( name="cross_attention_encoder", **utils.build_cross_attention_block_args( (z_shape, embeddings_shape), widening_factor=self._cross_attention_widening_factor, dropout_prob=self._dropout_prob, dropout_attn_prob=self._dropout_attn_prob, num_heads=self._cross_attention_num_heads, att_init_scale=self._att_init_scale, dense_init_scale=self._dense_init_scale, shape_for_attn=self._cross_attention_shape_for_attn, use_query_residual=self._cross_attention_use_query_residual, norm_epsilon=self._norm_epsilon, qk_last_dim=self._qk_last_dim, v_last_dim=self._v_last_dim)) def call(self, inputs, input_mask=None, training=None): embeddings = inputs[0] z = inputs[1] if input_mask is None: input_mask = tf.ones(tf.shape(embeddings)[:2], dtype=tf.int32) attention_mask = utils.make_cross_attention_mask( query_mask=tf.ones(tf.shape(z)[:2], dtype=tf.int32), kv_mask=input_mask) z = self._cross_attention_encoder_block( (z, embeddings, attention_mask), training=training) for _ in range(self._num_blocks): for self_attention_block in self._self_attention_encoder_blocks: z = self_attention_block(z, training=training) return z
7,101
40.532164
146
py
models
models-master/official/projects/perceiver/modeling/layers/encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for encoder.""" import numpy as np import tensorflow as tf from official.projects.perceiver.modeling.layers import encoder class EncoderTest(tf.test.TestCase): def test_layer_creation(self): test_layer = encoder.Encoder( self_attention_num_heads=8, cross_attention_num_heads=8) sequence_length = 80 embedding_width = 800 lantent_length = 8 latent_width = 80 data_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) output_tensor = test_layer((data_input, latent_input)) self.assertEqual( latent_input.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_mask(self): test_layer = encoder.Encoder( self_attention_num_heads=8, cross_attention_num_heads=8) sequence_length = 80 embedding_width = 800 lantent_length = 8 latent_width = 80 data_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) mask_tensor = tf.keras.Input( shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer( (data_input, latent_input), input_mask=mask_tensor) self.assertEqual( latent_input.shape.as_list(), output_tensor.shape.as_list()) def test_layer_invocation(self): test_layer = encoder.Encoder( self_attention_num_heads=8, cross_attention_num_heads=8) sequence_length = 80 embedding_width = 800 lantent_length = 8 latent_width = 80 data_input = tf.keras.Input( shape=(sequence_length, embedding_width)) latent_input = tf.keras.Input( shape=(lantent_length, latent_width)) mask_tensor = tf.keras.Input( shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer( (data_input, latent_input), input_mask=mask_tensor) # Create a model from the test layer. model = tf.keras.Model( ((data_input, latent_input), mask_tensor), output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, embedding_width)) mask_data = tf.ones((batch_size, sequence_length), dtype=tf.int32) latent_data = tf.ones((batch_size, lantent_length, latent_width), dtype=tf.float32) _ = model.predict(((input_data, latent_data), mask_data)) def test_self_attention_widening_factor(self): last_dim = 160 self_attention_widening_factor = 2 test_layer = encoder.Encoder( self_attention_widening_factor=self_attention_widening_factor, v_last_dim=last_dim) some_sequence_length = 80 some_embedding_width = 800 some_lantent_length = 8 some_latent_width = last_dim data_input = tf.keras.Input( shape=(some_sequence_length, some_embedding_width)) latent_input = tf.keras.Input( shape=(some_lantent_length, some_latent_width)) mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32) test_layer((data_input, latent_input), input_mask=mask_tensor) value = test_layer._self_attention_encoder_blocks[ 0]._intermediate_dense.get_config()['output_shape'].pop() self.assertEqual(last_dim * self_attention_widening_factor, value) def test_cross_attention_widening_factor(self): last_dim = 160 cross_attention_widening_factor = 2 test_layer = encoder.Encoder( cross_attention_widening_factor=cross_attention_widening_factor, v_last_dim=last_dim) some_sequence_length = 80 some_embedding_width = 800 some_lantent_length = 8 some_latent_width = last_dim data_input = tf.keras.Input( shape=(some_sequence_length, some_embedding_width)) latent_input = tf.keras.Input( shape=(some_lantent_length, some_latent_width)) mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32) test_layer((data_input, latent_input), input_mask=mask_tensor) value = test_layer._cross_attention_encoder_block._intermediate_dense.get_config( )['output_shape'].pop() self.assertEqual(last_dim * cross_attention_widening_factor, value) def test_self_attention_num_heads(self): # TODO(b/222634115) parameterize test. self_attention_num_heads = 16 test_layer = encoder.Encoder( self_attention_num_heads=self_attention_num_heads) some_sequence_length = 80 some_embedding_width = 800 some_lantent_length = 8 some_latent_width = 64 data_input = tf.keras.Input( shape=(some_sequence_length, some_embedding_width)) latent_input = tf.keras.Input( shape=(some_lantent_length, some_latent_width)) mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32) test_layer((data_input, latent_input), input_mask=mask_tensor) value = test_layer._self_attention_encoder_blocks[ 0]._attention_layer.get_config()['num_heads'] self.assertEqual(self_attention_num_heads, value) def test_cross_attention_num_heads(self): # TODO(b/222634115) parameterize test. cross_attention_num_heads = 16 test_layer = encoder.Encoder( cross_attention_num_heads=cross_attention_num_heads) some_sequence_length = 80 some_embedding_width = 800 some_lantent_length = 8 some_latent_width = 64 data_input = tf.keras.Input( shape=(some_sequence_length, some_embedding_width)) latent_input = tf.keras.Input( shape=(some_lantent_length, some_latent_width)) mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32) test_layer((data_input, latent_input), input_mask=mask_tensor) value = test_layer._cross_attention_encoder_block._attention_layer.get_config( )['num_heads'] self.assertEqual(cross_attention_num_heads, value) def test_num_self_attends_per_block(self): # TODO(b/222634115) parameterize test. num_self_attends_per_block = 3 test_layer = encoder.Encoder( num_self_attends_per_block=num_self_attends_per_block) some_sequence_length = 80 some_embedding_width = 800 some_lantent_length = 8 some_latent_width = 64 data_input = tf.keras.Input( shape=(some_sequence_length, some_embedding_width)) latent_input = tf.keras.Input( shape=(some_lantent_length, some_latent_width)) mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32) test_layer((data_input, latent_input), input_mask=mask_tensor) self.assertLen( test_layer._self_attention_encoder_blocks, num_self_attends_per_block) # TODO(b/222634115) num_blocks # TODO(b/222634115) qk_last_dim validations # TODO(b/222634115) v_last_dim validations # TODO(b/222634115) dropout_prob validation # TODO(b/222634115) dropout_attn_prob validation # TODO(b/222634115) att_init_scale validation # TODO(b/222634115) dense_init_scale validation # TODO(b/222634115) cross_attention_use_query_residual validation # (value passed correctly) # TODO(b/222634115) norm_epsilon # TODO(b/222634115) check latent dims # TODO(b/222634115) make cross att mask validation when input_mask is None # TODO(b/222634115) make cross att mask validation when input_mask is not None if __name__ == '__main__': tf.test.main()
8,211
36.669725
85
py
models
models-master/official/projects/perceiver/modeling/layers/decoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver basic decoder.""" import collections import tensorflow as tf from official.nlp.modeling import layers from official.projects.perceiver.modeling.layers import utils class Decoder(tf.keras.layers.Layer): """Perceiver Decoder layer. Uses cross attention decoder layer. This layer implements a Perceiver Decoder from "Perceiver: General Perception with Iterative Attention". (https://arxiv.org/abs/2103.03206) References: [Attention Is All You Need](https://arxiv.org/abs/1706.03762) [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206) (https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py) (https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py) """ def __init__(self, output_last_dim, qk_last_dim=None, v_last_dim=None, use_query_residual=False, output_w_init=None, num_heads=1, name="decoder", **kwargs): """Init. Args: output_last_dim: Last dim size for output. qk_last_dim: When set, determines the last dimension of the attention score output. Check `qk_last_dim` doc in `utils.build_cross_attention_block_args`. v_last_dim: When set, determines the value's last dimension in the multi-head attention. Check `v_last_dim` doc in `utils._build_transformer_encoder_block_args`. use_query_residual: Toggle to execute residual connection after attention. output_w_init: Ouptut layer kernel initializer. num_heads: Number of attention heads for the `TransformerEncoderBlock`. name: Sets the `tf.keras.layers.Layer` name. **kwargs: Any keyword arguments to pass through to `tf.keras.layers.Layer`. """ super().__init__(name=name, **kwargs) self._output_last_dim = output_last_dim self._output_w_init = output_w_init self._use_query_residual = use_query_residual self._qk_last_dim = qk_last_dim self._v_last_dim = v_last_dim self._final_project = False # Make variable if needed self._num_heads = num_heads # Omitted `concat_preprocessed_input` for MLM use-case. def build(self, input_shape): """Build layers using `input_shape`. Args: input_shape: Input shape(s) of the layer call. """ decoder_query_shape = input_shape[0] z_shape = input_shape[1] self._decoding_cross_attn = layers.TransformerEncoderBlock( **utils.build_cross_attention_block_args( (decoder_query_shape, z_shape), widening_factor=1, dropout_prob=0.0, num_heads=self._num_heads, shape_for_attn="kv", qk_last_dim=self._qk_last_dim, v_last_dim=self._v_last_dim, use_query_residual=self._use_query_residual)) def call(self, inputs, training=None, query_mask=None): """Return decoded output of latent vector via the query. Args: inputs: Expect inputs to be a tuple of perceiver's decoder query tensor and latent tensor (z). For the cross attention block, `z` is the key-value tensor and decoder query is the query tensor. Latent tensor comes from the self-attention processing blocks and decoder query comes from users to query for the desired output. training: Flag to indicate training status. query_mask: mask used to create the attention mask for the query tensor in the cross attention block. Returns: `tf.Tensor` decoded output of latent vector via the query. """ if not isinstance(inputs, collections.abc.Sequence): raise ValueError("`inputs` must be a sequence.") if len(inputs) != 2: raise ValueError("`inputs` must have two elements.") query, z = inputs # Cross-attention decoding. # key, value: B x N x K; query: B x M x K # Attention maps -> B x N x M # Output -> B x M x K # Construct cross attention and linear layer lazily, in case we don't need # them. if query_mask is None: attention_mask = None else: attention_mask = utils.make_cross_attention_mask( query_mask=query_mask, kv_mask=tf.ones(tf.shape(z)[:2], dtype=tf.int32)) output = self._decoding_cross_attn( (query, z, attention_mask), training=training) return output
5,196
34.114865
146
py
models
models-master/official/projects/perceiver/tasks/pretrain.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Task for perceiver wordpiece tokenized masked language model (MLM).""" import tensorflow as tf from official.core import task_factory from official.modeling import tf_utils from official.nlp.tasks import masked_lm from official.projects.perceiver.configs import encoders from official.projects.perceiver.configs import perceiver from official.projects.perceiver.modeling.layers import decoder from official.projects.perceiver.modeling.models import pretrainer from official.projects.perceiver.modeling.networks import positional_decoder @task_factory.register_task_cls(perceiver.PretrainConfig) class PretrainTask(masked_lm.MaskedLMTask): """Task for masked language modeling for wordpiece tokenized perceiver.""" def build_model(self, params=None): """Creates perceiver pretrainer model architecture. Args: params: The task configuration instance, which can be any of dataclass, ConfigDict, namedtuple, etc. Returns: A model instance. """ config = params or self.task_config.model sequence_encoder_cfg = config.encoder encoder_network = encoders.build_encoder(sequence_encoder_cfg) decoder_cfg = config.decoder decoder_ = decoder.Decoder(decoder_cfg.decoder.as_dict()) mlm_decoder = positional_decoder.PositionalDecoder( decoder=decoder_, output_index_dim=decoder_cfg.output_index_dim, z_index_dim=decoder_cfg.z_index_dim, d_latents=decoder_cfg.d_latents, d_model=decoder_cfg.d_model, position_encoding_intializer_stddev=decoder_cfg .position_encoding_intializer_stddev) return pretrainer.Pretrainer( mlm_activation=tf_utils.get_activation(config.mlm_activation), mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=config.mlm_initializer_range), encoder=encoder_network, decoder=mlm_decoder)
2,498
39.306452
76
py
models
models-master/official/projects/perceiver/tasks/sentence_prediction_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.nlp.tasks.sentence_prediction.""" import functools import os from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.data import sentence_prediction_dataloader from official.nlp.tasks import sentence_prediction from official.projects.perceiver.configs import perceiver from official.projects.perceiver.tasks import pretrain from official.projects.perceiver.tasks import sentence_prediction as perceiver_pred def _create_fake_dataset(output_path, seq_length, num_classes, num_examples): """Creates a fake dataset. Args: output_path: output path for the writer to serialize the dataset. seq_length: sequence length of the data. num_classes: Number of classes in the sentence prediction output. This is used to determine if the label id feature should be for regression or classification. num_examples: number of fake examples to create. """ with tf.io.TFRecordWriter(output_path) as writer: def create_int_feature(values): return tf.train.Feature( int64_list=tf.train.Int64List(value=np.ravel(values))) def create_float_feature(values): return tf.train.Feature( float_list=tf.train.FloatList(value=np.ravel(values))) for i in range(num_examples): features = {} input_ids = np.random.randint(100, size=(seq_length)) features["input_ids"] = create_int_feature(input_ids) features["input_mask"] = create_int_feature(np.ones_like(input_ids)) features["segment_ids"] = create_int_feature(np.ones_like(input_ids)) features["segment_ids"] = create_int_feature(np.ones_like(input_ids)) features["example_id"] = create_int_feature([i]) if num_classes == 1: features["label_ids"] = create_float_feature([np.random.random()]) else: features["label_ids"] = create_int_feature( [np.random.random_integers(0, num_classes - 1, size=())]) tf_example = tf.train.Example( features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super().setUp() self._train_data_config = ( sentence_prediction_dataloader.SentencePredictionDataConfig( input_path="dummy", seq_length=128, global_batch_size=1)) def get_model_config(self, num_classes): return perceiver.ClassificationConfig( num_classes=num_classes, encoder=perceiver.SequenceEncoderConfig( vocab_size=30_522, encoder=perceiver.EncoderConfig( num_self_attends_per_block=2))) def _run_task(self, config): task = perceiver_pred.SentencePredictionTask(config) model = task.build_model() metrics = task.build_metrics() strategy = tf.distribute.get_strategy() dataset = strategy.distribute_datasets_from_function( functools.partial(task.build_inputs, config.train_data)) iterator = iter(dataset) optimizer = tf.keras.optimizers.SGD(lr=0.1) task.train_step(next(iterator), model, optimizer, metrics=metrics) # model.save(os.path.join(self.get_temp_dir(), "saved_model")) # TODO(b/222634115) fix save return task.validation_step(next(iterator), model, metrics=metrics) def test_task(self): # Saves a checkpoint. pretrain_cfg = perceiver.PretrainerConfig( encoder=perceiver.SequenceEncoderConfig( vocab_size=30_522, encoder=perceiver.EncoderConfig( num_self_attends_per_block=2))) pretrain_model = pretrain.PretrainTask( None).build_model(pretrain_cfg) # The model variables will be created after the forward call. _ = pretrain_model(pretrain_model.inputs) ckpt = tf.train.Checkpoint( model=pretrain_model, **pretrain_model.checkpoint_items) init_path = ckpt.save(self.get_temp_dir()) # Creates the task. config = perceiver.SentencePredictionConfig( init_checkpoint=init_path, model=self.get_model_config(num_classes=2), train_data=self._train_data_config) task = perceiver_pred.SentencePredictionTask(config) model = task.build_model() metrics = task.build_metrics() dataset = task.build_inputs(config.train_data) iterator = iter(dataset) optimizer = tf.keras.optimizers.SGD(lr=0.1) task.initialize(model) task.train_step(next(iterator), model, optimizer, metrics=metrics) task.validation_step(next(iterator), model, metrics=metrics) @parameterized.named_parameters( { "testcase_name": "regression", "num_classes": 1, "expected_loss_predicate": lambda loss: loss > 1.0, "metric": tf.keras.metrics.MeanSquaredError, }, { "testcase_name": "classification", "num_classes": 2, "expected_loss_predicate": lambda loss: loss < 1.0, "metric": tf.keras.metrics.SparseCategoricalAccuracy }, ) def test_metrics_and_losses(self, num_classes, expected_loss_predicate, metric): config = perceiver.SentencePredictionConfig( init_checkpoint=self.get_temp_dir(), model=self.get_model_config(num_classes), train_data=self._train_data_config) task = perceiver_pred.SentencePredictionTask(config) model = task.build_model() metrics = task.build_metrics() self.assertIsInstance(metrics[0], metric) dataset = task.build_inputs(config.train_data) iterator = iter(dataset) optimizer = tf.keras.optimizers.SGD(lr=0.1) task.train_step(next(iterator), model, optimizer, metrics=metrics) logs = task.validation_step(next(iterator), model, metrics=metrics) loss = logs["loss"].numpy() self.assertTrue(expected_loss_predicate(loss)) @parameterized.named_parameters( { "testcase_name": "matthews_corrcoef", "num_classes": 2, "metric_type": "matthews_corrcoef" }, { "testcase_name": "pearson_spearman_corr", "num_classes": 1, "metric_type": "pearson_spearman_corr" }) def test_np_metrics(self, metric_type, num_classes): config = perceiver.SentencePredictionConfig( metric_type=metric_type, init_checkpoint=self.get_temp_dir(), model=self.get_model_config(num_classes), train_data=self._train_data_config) task = perceiver_pred.SentencePredictionTask(config) model = task.build_model() dataset = task.build_inputs(config.train_data) iterator = iter(dataset) strategy = tf.distribute.get_strategy() distributed_outputs = strategy.run( functools.partial(task.validation_step, model=model), args=(next(iterator),)) outputs = tf.nest.map_structure(strategy.experimental_local_results, distributed_outputs) aggregated = task.aggregate_logs(step_outputs=outputs) aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs) self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated)) def test_np_metrics_cola_partial_batch(self): train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record") num_examples = 5 global_batch_size = 8 seq_length = 16 _create_fake_dataset( train_data_path, seq_length=seq_length, num_classes=2, num_examples=num_examples) train_data_config = ( sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=train_data_path, seq_length=seq_length, is_training=True, label_type="int", global_batch_size=global_batch_size, drop_remainder=False, include_example_id=True)) config = perceiver.SentencePredictionConfig( metric_type="matthews_corrcoef", model=self.get_model_config(2), train_data=train_data_config) outputs = self._run_task(config) self.assertEqual(outputs["sentence_prediction"].shape.as_list(), [8, 1]) @parameterized.named_parameters( { "testcase_name": "classification", "num_classes": 5, }, { "testcase_name": "regression", "num_classes": 1, }) def test_prediction(self, num_classes): config = perceiver.SentencePredictionConfig( model=self.get_model_config(num_classes=num_classes), train_data=self._train_data_config) task = perceiver_pred.SentencePredictionTask(config) model = task.build_model() test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record") seq_length = 16 num_examples = 100 _create_fake_dataset( test_data_path, seq_length=seq_length, num_classes=num_classes, num_examples=num_examples) test_data_config = ( sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=test_data_path, seq_length=seq_length, is_training=False, label_type="int" if num_classes > 1 else "float", global_batch_size=16, drop_remainder=False, include_example_id=True)) predictions = sentence_prediction.predict(task, test_data_config, model) self.assertLen(predictions, num_examples) for prediction in predictions: self.assertEqual(prediction.dtype, tf.int64 if num_classes > 1 else tf.float32) if __name__ == "__main__": tf.test.main()
10,241
35.448399
83
py
models
models-master/official/projects/perceiver/tasks/pretrain_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.nlp.tasks.masked_lm.""" import tensorflow as tf import tensorflow_datasets as tfds from official.nlp.data import pretrain_dataloader from official.projects.perceiver.configs import perceiver from official.projects.perceiver.tasks import pretrain as tasks _NUM_EXAMPLES = 10 def _gen_fn(): word_ids = tf.constant([1, 1], dtype=tf.int32) mask = tf.constant([1, 1], dtype=tf.int32) lm_mask = tf.constant([1, 1], dtype=tf.int32) return { 'file_name': 'test', 'masked_lm_positions': lm_mask, 'input_word_ids': word_ids, 'input_mask': mask, } def _as_dataset(self, *args, **kwargs): del args del kwargs return tf.data.Dataset.from_generator( lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)), output_types=self.info.features.dtype, output_shapes=self.info.features.shape, ) def _fake_build_inputs(self, params, input_context=None): # pylint: disable=unused-argument def dummy_data(_): dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32) return dict( input_word_ids=dummy_ids, input_mask=dummy_ids, masked_lm_positions=dummy_lm, masked_lm_ids=dummy_lm, masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32)) dataset = tf.data.Dataset.range(1) dataset = dataset.repeat() dataset = dataset.map( dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset class PretrainTaskTest(tf.test.TestCase): def setUp(self): super().setUp() tasks.PretrainTask.build_inputs = _fake_build_inputs def test_task(self): config = perceiver.PretrainConfig( train_data=pretrain_dataloader.BertPretrainDataConfig( input_path='dummy', global_batch_size=512, use_next_sentence_label=False, use_v2_feature_names=True), validation_data=pretrain_dataloader.BertPretrainDataConfig( input_path='dummy', global_batch_size=512, is_training=False, use_next_sentence_label=False, use_v2_feature_names=True)) task = tasks.PretrainTask(config) model = task.build_model() metrics = task.build_metrics() dataset = task.build_inputs(config.train_data) iterator = iter(dataset) optimizer = tf.keras.optimizers.SGD(lr=0.1) task.train_step(next(iterator), model, optimizer, metrics=metrics) task.validation_step(next(iterator), model, metrics=metrics) # Saves a checkpoint. _ = tf.train.Checkpoint(model=model, **model.checkpoint_items) # ckpt.save(config.init_checkpoint) # TODO(b/222634115) fix ckpt.save task.initialize(model) def test_train_step(self): config = perceiver.PretrainConfig( train_data=pretrain_dataloader.BertPretrainDataConfig( input_path='dummy', global_batch_size=512, use_next_sentence_label=False, use_v2_feature_names=True), validation_data=pretrain_dataloader.BertPretrainDataConfig( input_path='dummy', global_batch_size=512, is_training=False, use_next_sentence_label=False, use_v2_feature_names=True)) with tfds.testing.mock_data(as_dataset_fn=_as_dataset): task = tasks.PretrainTask(config) model = task.build_model() dataset = task.build_inputs(config.train_data) metrics = task.build_metrics() iterator = iter(dataset) opt_cfg = perceiver._MLM_WORDPIECE_TRAINER.optimizer_config optimizer = tasks.PretrainTask.create_optimizer(opt_cfg) task.train_step(next(iterator), model, optimizer, metrics=metrics) # TODO(b/222634115) add test coverage. if __name__ == '__main__': tf.test.main()
4,425
32.278195
92
py
models
models-master/official/projects/mosaic/mosaic_tasks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Task definition for image semantic segmentation with MOSAIC models.""" from absl import logging import tensorflow as tf from official.core import task_factory from official.projects.mosaic.configs import mosaic_config from official.projects.mosaic.modeling import mosaic_model from official.vision.tasks import semantic_segmentation as seg_tasks @task_factory.register_task_cls(mosaic_config.MosaicSemanticSegmentationTask) class MosaicSemanticSegmentationTask(seg_tasks.SemanticSegmentationTask): """A task for semantic segmentation using MOSAIC model.""" # Note: the `build_model` is overrided to add an additional `train` flag # for the purpose of indicating the model is built for performing `training` # or `eval`. This is to make sure the model is initialized with proper # `input_shape` if the model will be trained and evaluated in different # `input_shape`. For example, the model is trained with cropping but # evaluated with original shape. def build_model(self, training: bool = True) -> tf.keras.Model: """Builds MOSAIC segmentation model.""" input_specs = tf.keras.layers.InputSpec( shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = mosaic_model.build_mosaic_segmentation_model( input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) # Note: Create a dummy input and call model instance to initialize. # This ensures all the layers are built; otherwise some layers may be # missing from the model and cannot be associated with variables from # a loaded checkpoint. The input size is determined by whether the model # is built for performing training or eval. if training: input_size = self.task_config.train_data.output_size crop_size = self.task_config.train_data.crop_size if crop_size: input_size = crop_size else: input_size = self.task_config.validation_data.output_size if len(self.task_config.model.input_size) == 3: input_channel = self.task_config.model.input_size[-1] else: input_channel = 3 dummy_input = tf.ones(shape=[1] + input_size + [input_channel]) model(dummy_input) return model def initialize(self, model: tf.keras.Model): """Loads pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if 'all' in self.task_config.init_checkpoint_modules: ckpt = tf.train.Checkpoint(**model.checkpoint_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: ckpt_items = {} if 'backbone' in self.task_config.init_checkpoint_modules: ckpt_items.update(backbone=model.backbone) if 'neck' in self.task_config.init_checkpoint_modules: ckpt_items.update(neck=model.neck) ckpt = tf.train.Checkpoint(**ckpt_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file)
4,303
40.786408
78
py
models
models-master/official/projects/mosaic/qat/serving/export_module.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Export modules for QAT model serving/inference.""" import tensorflow as tf from official.projects.mosaic.modeling import mosaic_model from official.projects.mosaic.qat.modeling import factory as qat_factory from official.vision.serving import semantic_segmentation class MosaicModule(semantic_segmentation.SegmentationModule): """MOSAIC Module.""" def _build_model(self) -> tf.keras.Model: input_specs = tf.keras.layers.InputSpec(shape=[1] + self._input_image_size + [3]) model = mosaic_model.build_mosaic_segmentation_model( input_specs=input_specs, model_config=self.params.task.model, l2_regularizer=None) dummy_input = tf.ones(shape=input_specs.shape) model(dummy_input) # Check whether "quantization" is in task config to support both # `quantized` and `non-quantized` version of Mosaic. if hasattr(self.params.task, "quantization"): return qat_factory.build_qat_mosaic_model( model, self.params.task.quantization, input_specs) return model
1,685
37.318182
74
py
models
models-master/official/projects/mosaic/qat/modeling/factory_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for factory.py.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.mosaic.modeling import mosaic_blocks from official.projects.mosaic.modeling import mosaic_head from official.projects.mosaic.modeling import mosaic_model from official.projects.mosaic.qat.modeling import factory as qat_factory from official.projects.qat.vision.configs import common from official.vision.modeling import backbones from official.vision.modeling.heads import segmentation_heads class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, [4, 8], [3, 2], ['concat_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['sum_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['concat_merge', 'concat_merge']), (512, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']), (256, [4, 8], [3, 2], ['concat_merge', 'sum_merge']), (256, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']), (256, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']), ) def test_mosaic_segmentation_model(self, input_size, pyramid_pool_bin_nums, decoder_input_levels, decoder_stage_merge_styles): """Test for building and calling of a MOSAIC segmentation network.""" num_classes = 32 tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.MobileNet(model_id='MobileNetMultiAVGSeg') encoder_input_level = 4 # Create a regular FP32 MOSAIC model. neck = mosaic_blocks.MosaicEncoderBlock( encoder_input_level=encoder_input_level, branch_filter_depths=[64, 64], conv_kernel_sizes=[3, 5], pyramid_pool_bin_nums=pyramid_pool_bin_nums) head = mosaic_head.MosaicDecoderHead( num_classes=num_classes, decoder_input_levels=decoder_input_levels, decoder_stage_merge_styles=decoder_stage_merge_styles, decoder_filters=[64, 64], decoder_projected_filters=[32, 32]) mask_scoring_head = segmentation_heads.MaskScoring( num_classes=num_classes, num_convs=1, num_filters=32, fc_dims=128, num_fcs=2, fc_input_size=[8, 8], ) model = mosaic_model.MosaicSegmentationModel( backbone=backbone, head=head, neck=neck, mask_scoring_head=mask_scoring_head, ) inputs = np.random.rand(2, input_size, input_size, 3) input_specs = tf.keras.layers.InputSpec(shape=inputs.shape) expected_outputs = model(inputs) # Create a quantized MOSAIC model from the regular FP32 model instance. quantization_config = common.Quantization() quantized_model = qat_factory.build_qat_mosaic_model( model=model, quantization=quantization_config, input_specs=input_specs) actual_output = quantized_model(inputs) self.assertAllEqual(actual_output['logits'].numpy().shape, expected_outputs['logits'].numpy().shape) if __name__ == '__main__': tf.test.main()
3,796
37.353535
77
py
models
models-master/official/projects/mosaic/qat/modeling/factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factory methods to build models.""" # Import libraries import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.mosaic.modeling import mosaic_blocks from official.projects.mosaic.modeling import mosaic_head from official.projects.mosaic.modeling import mosaic_model from official.projects.mosaic.qat.modeling.heads import mosaic_head as qat_mosaic_head from official.projects.mosaic.qat.modeling.layers import nn_blocks as qat_nn_blocks from official.projects.qat.vision.configs import common from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers from official.projects.qat.vision.quantization import helper from official.projects.qat.vision.quantization import schemes def build_qat_mosaic_model( model: tf.keras.Model, quantization: common.Quantization, input_specs: tf.keras.layers.InputSpec) -> tf.keras.Model: """Applies quantization aware training for mosaic segmentation model. Args: model: The model applying quantization aware training. quantization: The Quantization config. input_specs: The shape specifications of input tensor. Returns: The model that applied optimization techniques. """ original_checkpoint = quantization.pretrained_original_checkpoint if original_checkpoint is not None: ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items) status = ckpt.read(original_checkpoint) status.expect_partial().assert_existing_objects_matched() scope_dict = { 'L2': tf.keras.regularizers.l2, } model.use_legacy_config = True # Ensures old Keras serialization format # Apply QAT to backbone (a tf.keras.Model) first, and then neck and head. with tfmot.quantization.keras.quantize_scope(scope_dict): annotated_backbone = tfmot.quantization.keras.quantize_annotate_model( model.backbone) optimized_backbone = tfmot.quantization.keras.quantize_apply( annotated_backbone, scheme=schemes.Default8BitQuantizeScheme()) # Check for valid encoder and head. if not isinstance(model.head, mosaic_head.MosaicDecoderHead): raise ValueError('Only support MosaicDecoderHead for head.') if not isinstance(model.neck, mosaic_blocks.MosaicEncoderBlock): raise ValueError('Only support MosaicEncoderBlock for encoder.') head = qat_mosaic_head.MosaicDecoderHeadQuantized.from_config( model.head.get_config()) neck = qat_nn_blocks.MosaicEncoderBlockQuantized.from_config( model.neck.get_config()) mask_scoring_head = None if model.mask_scoring_head is not None: mask_scoring_head = qat_nn_layers.MaskScoringQuantized.from_config( model.mask_scoring_head.get_config() ) optimized_model = mosaic_model.MosaicSegmentationModel( backbone=optimized_backbone, head=head, neck=neck, mask_scoring_head=mask_scoring_head, ) dummpy_input = tf.zeros([1] + list(input_specs.shape[1:])) optimized_model(dummpy_input, training=True) helper.copy_original_weights(model.head, optimized_model.head) helper.copy_original_weights(model.neck, optimized_model.neck) if model.mask_scoring_head is not None: helper.copy_original_weights( model.mask_scoring_head, optimized_model.mask_scoring_head ) return optimized_model
3,920
38.21
86
py
models
models-master/official/projects/mosaic/qat/modeling/layers/nn_blocks_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_blocks.""" from typing import Any, Iterable, Tuple # Import libraries from absl.testing import parameterized import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.projects.mosaic.qat.modeling.layers import nn_blocks def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: """Returns the combinations of end-to-end tests to run.""" return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], ) class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks.MultiKernelGroupConvBlockQuantized, [32, 64]), (nn_blocks.MultiKernelGroupConvBlockQuantized, [64, 128]), ) def test_multi_kernel_grouped_convolution_block_creation( self, block_fn, output_filter_depths): input_size = 32 inputs = tf.keras.Input(shape=(input_size, input_size, 16), batch_size=1) block = block_fn( output_filter_depths=output_filter_depths, kernel_sizes=[3, 3]) features = block(inputs) self.assertAllEqual([1, input_size, input_size, sum(output_filter_depths)], features.shape.as_list()) @parameterized.parameters( (nn_blocks.MosaicEncoderBlockQuantized, [32, 64], [3, 3], [2, 2]), (nn_blocks.MosaicEncoderBlockQuantized, [64, 128], [3, 1], [2, 4]), (nn_blocks.MosaicEncoderBlockQuantized, [128, 256], [1, 1], [1, 1]), (nn_blocks.MosaicEncoderBlockQuantized, [128, 256], [3, 3], [4, 4]), ) def test_mosaic_encoder_block_creation(self, block_fn, branch_filter_depths, conv_kernel_sizes, pyramid_pool_bin_nums): input_size = 128 in_filters = 24 inputs = tf.keras.Input( shape=(input_size, input_size, in_filters), batch_size=1) block = block_fn( branch_filter_depths=branch_filter_depths, conv_kernel_sizes=conv_kernel_sizes, pyramid_pool_bin_nums=pyramid_pool_bin_nums) features = block(inputs) self.assertAllEqual([1, input_size, input_size, sum(branch_filter_depths)], features.shape.as_list()) @parameterized.parameters( (nn_blocks.DecoderSumMergeBlockQuantized, 32, [128, 64]), (nn_blocks.DecoderSumMergeBlockQuantized, 16, [32, 32]), ) def test_decoder_sum_merge_block_creation(self, block_fn, decoder_projected_depth, output_size): inputs = (tf.keras.Input(shape=(64, 64, 128), batch_size=1), tf.keras.Input(shape=(16, 16, 256), batch_size=1)) block = block_fn( decoder_projected_depth=decoder_projected_depth, output_size=output_size) features = block(inputs) self.assertAllEqual( [1, output_size[0], output_size[1], decoder_projected_depth], features.shape.as_list()) @parameterized.parameters( (nn_blocks.DecoderConcatMergeBlockQuantized, 64, 32, [128, 64]), (nn_blocks.DecoderConcatMergeBlockQuantized, 256, 16, [32, 32]), ) def test_decoder_concat_merge_block_creation(self, block_fn, decoder_internal_depth, decoder_projected_depth, output_size): inputs = (tf.keras.Input(shape=(64, 64, 128), batch_size=1), tf.keras.Input(shape=(16, 16, 256), batch_size=1)) block = block_fn( decoder_internal_depth=decoder_internal_depth, decoder_projected_depth=decoder_projected_depth, output_size=output_size) features = block(inputs) self.assertAllEqual( [1, output_size[0], output_size[1], decoder_projected_depth], features.shape.as_list()) if __name__ == '__main__': tf.test.main()
4,687
37.743802
78
py
models
models-master/official/projects/mosaic/qat/modeling/layers/nn_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains quantized neural blocks for the QAT.""" from typing import Dict, Tuple, Union import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.mosaic.modeling import mosaic_blocks from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper @tf.keras.utils.register_keras_serializable(package='Vision') class MultiKernelGroupConvBlockQuantized(mosaic_blocks.MultiKernelGroupConvBlock ): """A quantized multi-kernel grouped convolution block. This block is used in the segmentation neck introduced in MOSAIC. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ def build(self, input_shape: tf.TensorShape) -> None: """Builds the block with the given input shape.""" input_channels = input_shape[self._group_split_axis] if input_channels % self._num_groups != 0: raise ValueError('The number of input channels must be divisible by ' 'the number of groups for evenly group split.') # Override the activation and bn with their quantized version. self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._use_sync_bn else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) self._bn_op = helper.norm_by_activation( self._activation, norm_with_quantize, norm_no_quantize) self._conv_branches = [] if self._use_depthwise_convolution: for i, conv_kernel_size in enumerate(self._kernel_sizes): depthwise_conv = helper.DepthwiseConv2DQuantized( kernel_size=(conv_kernel_size, conv_kernel_size), depth_multiplier=1, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=self._kernel_initializer, use_bias=False, activation=helper.NoOpActivation()) # Add BN->RELU after depthwise convolution. batchnorm_op_depthwise = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation_depthwise = self._activation_fn feature_conv = helper.Conv2DQuantized( filters=self._output_filter_depths[i], kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) # Use list manually as current QAT API does not support sequential model # within a tf.keras.Sequential block, e.g. conv_branch = # tf.keras.Sequential([depthwise_conv, feature_conv, batchnorm_op,]) conv_branch = [ depthwise_conv, batchnorm_op_depthwise, activation_depthwise, feature_conv, batchnorm_op, ] self._conv_branches.append(conv_branch) else: for i, conv_kernel_size in enumerate(self._kernel_sizes): norm_conv = helper.Conv2DQuantized( filters=self._output_filter_depths[i], kernel_size=(conv_kernel_size, conv_kernel_size), padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) conv_branch = [norm_conv, batchnorm_op] self._conv_branches.append(conv_branch) self._concat_groups = helper.ConcatenateQuantized( axis=self._group_split_axis) @tf.keras.utils.register_keras_serializable(package='Vision') class MosaicEncoderBlockQuantized(mosaic_blocks.MosaicEncoderBlock): """Implements the encoder module/block of MOSAIC model. Spatial Pyramid Pooling and Multi-kernel Conv layer SpatialPyramidPoolingMultiKernelConv References: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded context](https://arxiv.org/pdf/2112.11623.pdf) """ def build( self, input_shape: Union[tf.TensorShape, Dict[str, tf.TensorShape]]) -> None: """Builds this MOSAIC encoder block with the given single input shape.""" input_shape = ( input_shape[self._encoder_input_level] if isinstance(input_shape, dict) else input_shape) self._data_format = tf.keras.backend.image_data_format() if self._data_format == 'channels_last': height = input_shape[1] width = input_shape[2] else: height = input_shape[2] width = input_shape[3] self._global_pool_branch = None self._spatial_pyramid = [] # Override the activation and bn with their quantized version. self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._use_sync_bn else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) self._bn_op = helper.norm_by_activation( self._activation, norm_with_quantize, norm_no_quantize) for pyramid_pool_bin_num in self._pyramid_pool_bin_nums: if pyramid_pool_bin_num == 1: global_pool = helper.GlobalAveragePooling2DQuantized( data_format=self._data_format, keepdims=True) global_projection = helper.Conv2DQuantized( filters=max(self._branch_filter_depths), kernel_size=(1, 1), padding='same', activation=helper.NoOpActivation(), kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, use_bias=False) batch_norm_global_branch = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) # Use list manually instead of tf.keras.Sequential([]) self._global_pool_branch = [ global_pool, global_projection, batch_norm_global_branch, ] else: if height < pyramid_pool_bin_num or width < pyramid_pool_bin_num: raise ValueError('The number of pooling bins must be smaller than ' 'input sizes.') assert pyramid_pool_bin_num >= 2, ( 'Except for the gloabl pooling, the number of bins in pyramid ' 'pooling must be at least two.') pool_height, stride_height = self._get_bin_pool_kernel_and_stride( height, pyramid_pool_bin_num) pool_width, stride_width = self._get_bin_pool_kernel_and_stride( width, pyramid_pool_bin_num) bin_pool_level = helper.AveragePooling2DQuantized( pool_size=(pool_height, pool_width), strides=(stride_height, stride_width), padding='valid', data_format=self._data_format) self._spatial_pyramid.append(bin_pool_level) # Grouped multi-kernel Convolution. self._multi_kernel_group_conv = MultiKernelGroupConvBlockQuantized( output_filter_depths=self._branch_filter_depths, kernel_sizes=self._conv_kernel_sizes, use_sync_bn=self._use_sync_bn, batchnorm_momentum=self._batchnorm_momentum, batchnorm_epsilon=self._batchnorm_epsilon, activation=self._activation, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_depthwise_convolution=self._use_depthwise_convolution) # Encoder's final 1x1 feature projection. # Considering the relatively large #channels merged before projection, # enlarge the projection #channels to the sum of the filter depths of # branches. self._output_channels = sum(self._branch_filter_depths) # Use list manually instead of tf.keras.Sequential([]). self._encoder_projection = [ helper.Conv2DQuantized( filters=self._output_channels, kernel_size=(1, 1), padding='same', activation=helper.NoOpActivation(), kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_bias=False), self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon), ] # Use the TF2 default feature alignment rule for bilinear resizing. self._upsample = helper.ResizingQuantized( height, width, interpolation=self._interpolation, crop_to_aspect_ratio=False) self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis) @tf.keras.utils.register_keras_serializable(package='Vision') class DecoderSumMergeBlockQuantized(mosaic_blocks.DecoderSumMergeBlock): """Implements the decoder feature sum merge block of MOSAIC model. This block is used in the decoder of segmentation head introduced in MOSAIC. It essentially merges a high-resolution feature map of a low semantic level and a low-resolution feature map of a higher semantic level by 'Sum-Merge'. """ def build( self, input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None: """Builds the block with the given input shape.""" # Assume backbone features of the same level are concated before input. low_res_input_shape = input_shape[0] high_res_input_shape = input_shape[1] low_res_channels = low_res_input_shape[self._channel_axis] high_res_channels = high_res_input_shape[self._channel_axis] # Override the activation and bn with their quantized version. self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._use_sync_bn else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) self._bn_op = helper.norm_by_activation( self._activation, norm_with_quantize, norm_no_quantize) if low_res_channels != self._decoder_projected_depth: low_res_feature_conv = helper.Conv2DQuantized( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self._low_res_branch = [ low_res_feature_conv, batchnorm_op, ] if high_res_channels != self._decoder_projected_depth: high_res_feature_conv = helper.Conv2DQuantized( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op_high = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self._high_res_branch = [ high_res_feature_conv, batchnorm_op_high, ] # Resize feature maps. if tf.keras.backend.image_data_format() == 'channels_last': low_res_height = low_res_input_shape[1] low_res_width = low_res_input_shape[2] high_res_height = high_res_input_shape[1] high_res_width = high_res_input_shape[2] else: low_res_height = low_res_input_shape[2] low_res_width = low_res_input_shape[3] high_res_height = high_res_input_shape[2] high_res_width = high_res_input_shape[3] if (self._output_size[0] == 0 or self._output_size[1] == 0): self._output_size = (high_res_height, high_res_width) if (low_res_height != self._output_size[0] or low_res_width != self._output_size[1]): self._upsample_low_res = helper.ResizingQuantized( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) if (high_res_height != self._output_size[0] or high_res_width != self._output_size[1]): self._upsample_high_res = helper.ResizingQuantized( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) self._add_layer = tfmot.quantization.keras.QuantizeWrapperV2( tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True)) @tf.keras.utils.register_keras_serializable(package='Vision') class DecoderConcatMergeBlockQuantized(mosaic_blocks.DecoderConcatMergeBlock): """Implements the decoder feature concat merge block of MOSAIC model. This block is used in the decoder of segmentation head introduced in MOSAIC. It essentially merges a high-resolution feature map of a low semantic level and a low-resolution feature of a higher semantic level by 'Concat-Merge'. """ def build( self, input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None: """Builds this block with the given input shape.""" # Assume backbone features of the same level are concated before input. low_res_input_shape = input_shape[0] high_res_input_shape = input_shape[1] # Set up resizing feature maps before concat. if tf.keras.backend.image_data_format() == 'channels_last': low_res_height = low_res_input_shape[1] low_res_width = low_res_input_shape[2] high_res_height = high_res_input_shape[1] high_res_width = high_res_input_shape[2] else: low_res_height = low_res_input_shape[2] low_res_width = low_res_input_shape[3] high_res_height = high_res_input_shape[2] high_res_width = high_res_input_shape[3] self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis) # Override the activation and bn with their quantized version. self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(self._activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) norm_layer = ( tf.keras.layers.experimental.SyncBatchNormalization if self._use_sync_bn else tf.keras.layers.BatchNormalization) norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer) norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer) self._bn_op = helper.norm_by_activation( self._activation, norm_with_quantize, norm_no_quantize) if (self._output_size[0] == 0 or self._output_size[1] == 0): self._output_size = (high_res_height, high_res_width) if (low_res_height != self._output_size[0] or low_res_width != self._output_size[1]): self._upsample_low_res = helper.ResizingQuantized( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) if (high_res_height != self._output_size[0] or high_res_width != self._output_size[1]): self._upsample_high_res = helper.ResizingQuantized( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) # Set up a 3-layer separable convolution blocks, i.e. # 1x1->BN->RELU + Depthwise->BN->RELU + 1x1->BN->RELU. initial_feature_conv = helper.Conv2DQuantized( filters=self._decoder_internal_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op1 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation1 = self._activation_fn depthwise_conv = helper.DepthwiseConv2DQuantized( kernel_size=(3, 3), depth_multiplier=1, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=self._kernel_initializer, use_bias=False, activation=helper.NoOpActivation()) batchnorm_op2 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation2 = self._activation_fn project_feature_conv = helper.Conv2DQuantized( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=helper.NoOpActivation(), use_bias=False) batchnorm_op3 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation3 = self._activation_fn self._feature_fusion_block = [ initial_feature_conv, batchnorm_op1, activation1, depthwise_conv, batchnorm_op2, activation2, project_feature_conv, batchnorm_op3, activation3, ] self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis)
19,266
41.910913
80
py
models
models-master/official/projects/mosaic/qat/modeling/heads/mosaic_head.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of segmentation head of the MOSAIC model.""" from typing import List, Optional import tensorflow as tf import tensorflow_model_optimization as tfmot from official.modeling import tf_utils from official.projects.mosaic.modeling import mosaic_head from official.projects.mosaic.qat.modeling.layers import nn_blocks from official.projects.qat.vision.quantization import configs from official.projects.qat.vision.quantization import helper @tf.keras.utils.register_keras_serializable(package='Vision') class MosaicDecoderHeadQuantized(mosaic_head.MosaicDecoderHead): """Creates a quantized MOSAIC decoder in segmentation head. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ def __init__( self, num_classes: int, decoder_input_levels: Optional[List[str]] = None, decoder_stage_merge_styles: Optional[List[str]] = None, decoder_filters: Optional[List[int]] = None, decoder_projected_filters: Optional[List[int]] = None, encoder_end_level: Optional[int] = 4, use_additional_classifier_layer: bool = False, classifier_kernel_size: int = 1, activation: str = 'relu', use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a MOSAIC segmentation head. Args: num_classes: An `int` number of mask classification categories. The number of classes does not include background class. decoder_input_levels: A list of `str` specifying additional input levels from the backbone outputs for mask refinement in decoder. decoder_stage_merge_styles: A list of `str` specifying the merge style at each stage of the decoder, merge styles can be 'concat_merge' or 'sum_merge'. decoder_filters: A list of integers specifying the number of channels used at each decoder stage. Note: this only has affects if the decoder merge style is 'concat_merge'. decoder_projected_filters: A list of integers specifying the number of projected channels at the end of each decoder stage. encoder_end_level: An optional integer specifying the output level of the encoder stage, which is used if the input from the encoder to the decoder head is a dictionary. use_additional_classifier_layer: A `bool` specifying whether to use an additional classifier layer or not. It must be True if the final decoder projected filters does not match the `num_classes`. classifier_kernel_size: An `int` number to specify the kernel size of the classifier layer. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. batchnorm_momentum: A `float` of normalization momentum for the moving average. batchnorm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super().__init__( num_classes=num_classes, decoder_input_levels=decoder_input_levels, decoder_stage_merge_styles=decoder_stage_merge_styles, decoder_filters=decoder_filters, decoder_projected_filters=decoder_projected_filters, encoder_end_level=encoder_end_level, use_additional_classifier_layer=use_additional_classifier_layer, classifier_kernel_size=classifier_kernel_size, activation=activation, use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation, bias_regularizer=bias_regularizer, **kwargs) # Assuming decoder_input_levels and the following lists are sorted and # follow the same order. if decoder_input_levels is None: decoder_input_levels = ['3', '2'] if decoder_stage_merge_styles is None: decoder_stage_merge_styles = ['concat_merge', 'sum_merge'] if decoder_filters is None: decoder_filters = [64, 64] if decoder_projected_filters is None: decoder_projected_filters = [32, 32] self._decoder_input_levels = decoder_input_levels self._decoder_stage_merge_styles = decoder_stage_merge_styles self._decoder_filters = decoder_filters self._decoder_projected_filters = decoder_projected_filters if (len(decoder_input_levels) != len(decoder_stage_merge_styles) or len(decoder_input_levels) != len(decoder_filters) or len(decoder_input_levels) != len(decoder_projected_filters)): raise ValueError('The number of Decoder inputs and settings must match.') self._merge_stages = [] for (stage_merge_style, decoder_filter, decoder_projected_filter) in zip(decoder_stage_merge_styles, decoder_filters, decoder_projected_filters): if stage_merge_style == 'concat_merge': concat_merge_stage = nn_blocks.DecoderConcatMergeBlockQuantized( decoder_internal_depth=decoder_filter, decoder_projected_depth=decoder_projected_filter, output_size=(0, 0), use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, activation=activation, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation) self._merge_stages.append(concat_merge_stage) elif stage_merge_style == 'sum_merge': sum_merge_stage = nn_blocks.DecoderSumMergeBlockQuantized( decoder_projected_depth=decoder_projected_filter, output_size=(0, 0), use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, activation=activation, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation) self._merge_stages.append(sum_merge_stage) else: raise ValueError( 'A stage merge style in MOSAIC Decoder can only be concat_merge ' 'or sum_merge.') # Concat merge or sum merge does not require an additional classifer layer # unless the final decoder projected filter does not match num_classes. final_decoder_projected_filter = decoder_projected_filters[-1] if (final_decoder_projected_filter != num_classes and not use_additional_classifier_layer): raise ValueError('Additional classifier layer is needed if final decoder ' 'projected filters does not match num_classes!') self._use_additional_classifier_layer = use_additional_classifier_layer if use_additional_classifier_layer: # This additional classification layer uses different kernel # initializers and bias compared to earlier blocks. self._pixelwise_classifier = helper.Conv2DQuantized( name='pixelwise_classifier', filters=num_classes, kernel_size=classifier_kernel_size, padding='same', bias_initializer=tf.zeros_initializer(), kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activation=helper.NoOpActivation(), use_bias=True) self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2( tf_utils.get_activation(activation, use_keras_layer=True), configs.Default8BitActivationQuantizeConfig()) self._config_dict = { 'num_classes': num_classes, 'decoder_input_levels': decoder_input_levels, 'decoder_stage_merge_styles': decoder_stage_merge_styles, 'decoder_filters': decoder_filters, 'decoder_projected_filters': decoder_projected_filters, 'encoder_end_level': encoder_end_level, 'use_additional_classifier_layer': use_additional_classifier_layer, 'classifier_kernel_size': classifier_kernel_size, 'activation': activation, 'use_sync_bn': use_sync_bn, 'batchnorm_momentum': batchnorm_momentum, 'batchnorm_epsilon': batchnorm_epsilon, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'interpolation': interpolation, 'bias_regularizer': bias_regularizer }
10,060
46.457547
80
py
models
models-master/official/projects/mosaic/qat/tasks/mosaic_tasks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Semantic segmentation task definition.""" import tensorflow as tf from official.core import task_factory from official.projects.mosaic import mosaic_tasks from official.projects.mosaic.qat.configs import mosaic_config as exp_cfg from official.projects.mosaic.qat.modeling import factory @task_factory.register_task_cls(exp_cfg.MosaicSemanticSegmentationTask) class MosaicSemanticSegmentationTask(mosaic_tasks.MosaicSemanticSegmentationTask ): """A task for semantic segmentation with QAT.""" def build_model(self, training=True) -> tf.keras.Model: """Builds semantic segmentation model with QAT.""" model = super().build_model(training) if training: input_size = self.task_config.train_data.output_size crop_size = self.task_config.train_data.crop_size if crop_size: input_size = crop_size else: input_size = self.task_config.validation_data.output_size input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size + [3]) if self.task_config.quantization: model = factory.build_qat_mosaic_model( model, self.task_config.quantization, input_specs) return model
1,797
39.863636
80
py
models
models-master/official/projects/mosaic/modeling/mosaic_head.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of segmentation head of the MOSAIC model.""" from typing import Any, Dict, List, Mapping, Optional, Tuple, Union import tensorflow as tf from official.modeling import tf_utils from official.projects.mosaic.modeling import mosaic_blocks @tf.keras.utils.register_keras_serializable(package='Vision') class MosaicDecoderHead(tf.keras.layers.Layer): """Creates a MOSAIC decoder in segmentation head. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ def __init__( self, num_classes: int, decoder_input_levels: Optional[List[str]] = None, decoder_stage_merge_styles: Optional[List[str]] = None, decoder_filters: Optional[List[int]] = None, decoder_projected_filters: Optional[List[int]] = None, encoder_end_level: Optional[int] = 4, use_additional_classifier_layer: bool = False, classifier_kernel_size: int = 1, activation: str = 'relu', use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a MOSAIC segmentation head. Args: num_classes: An `int` number of mask classification categories. The number of classes does not include background class. decoder_input_levels: A list of `str` specifying additional input levels from the backbone outputs for mask refinement in decoder. decoder_stage_merge_styles: A list of `str` specifying the merge style at each stage of the decoder, merge styles can be 'concat_merge' or 'sum_merge'. decoder_filters: A list of integers specifying the number of channels used at each decoder stage. Note: this only has affects if the decoder merge style is 'concat_merge'. decoder_projected_filters: A list of integers specifying the number of projected channels at the end of each decoder stage. encoder_end_level: An optional integer specifying the output level of the encoder stage, which is used if the input from the encoder to the decoder head is a dictionary. use_additional_classifier_layer: A `bool` specifying whether to use an additional classifier layer or not. It must be True if the final decoder projected filters does not match the `num_classes`. classifier_kernel_size: An `int` number to specify the kernel size of the classifier layer. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. batchnorm_momentum: A `float` of normalization momentum for the moving average. batchnorm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super(MosaicDecoderHead, self).__init__(**kwargs) # Assuming 'decoder_input_levels' are sorted in descending order and the # other setting are listed in the order according to 'decoder_input_levels'. if decoder_input_levels is None: decoder_input_levels = ['3', '2'] if decoder_stage_merge_styles is None: decoder_stage_merge_styles = ['concat_merge', 'sum_merge'] if decoder_filters is None: decoder_filters = [64, 64] if decoder_projected_filters is None: decoder_projected_filters = [32, 32] self._decoder_input_levels = decoder_input_levels self._decoder_stage_merge_styles = decoder_stage_merge_styles self._decoder_filters = decoder_filters self._decoder_projected_filters = decoder_projected_filters if (len(decoder_input_levels) != len(decoder_stage_merge_styles) or len(decoder_input_levels) != len(decoder_filters) or len(decoder_input_levels) != len(decoder_projected_filters)): raise ValueError('The number of Decoder inputs and settings must match.') self._merge_stages = [] for (stage_merge_style, decoder_filter, decoder_projected_filter) in zip(decoder_stage_merge_styles, decoder_filters, decoder_projected_filters): if stage_merge_style == 'concat_merge': concat_merge_stage = mosaic_blocks.DecoderConcatMergeBlock( decoder_internal_depth=decoder_filter, decoder_projected_depth=decoder_projected_filter, output_size=(0, 0), use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, activation=activation, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation) self._merge_stages.append(concat_merge_stage) elif stage_merge_style == 'sum_merge': sum_merge_stage = mosaic_blocks.DecoderSumMergeBlock( decoder_projected_depth=decoder_projected_filter, output_size=(0, 0), use_sync_bn=use_sync_bn, batchnorm_momentum=batchnorm_momentum, batchnorm_epsilon=batchnorm_epsilon, activation=activation, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, interpolation=interpolation) self._merge_stages.append(sum_merge_stage) else: raise ValueError( 'A stage merge style in MOSAIC Decoder can only be concat_merge ' 'or sum_merge.') # Concat merge or sum merge does not require an additional classifer layer # unless the final decoder projected filter does not match num_classes. final_decoder_projected_filter = decoder_projected_filters[-1] if (final_decoder_projected_filter != num_classes and not use_additional_classifier_layer): raise ValueError('Additional classifier layer is needed if final decoder ' 'projected filters does not match num_classes!') self._use_additional_classifier_layer = use_additional_classifier_layer if use_additional_classifier_layer: # This additional classification layer uses different kernel # initializers and bias compared to earlier blocks. self._pixelwise_classifier = tf.keras.layers.Conv2D( name='pixelwise_classifier', filters=num_classes, kernel_size=classifier_kernel_size, padding='same', bias_initializer=tf.zeros_initializer(), kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True) self._activation_fn = tf_utils.get_activation(activation) self._config_dict = { 'num_classes': num_classes, 'decoder_input_levels': decoder_input_levels, 'decoder_stage_merge_styles': decoder_stage_merge_styles, 'decoder_filters': decoder_filters, 'decoder_projected_filters': decoder_projected_filters, 'encoder_end_level': encoder_end_level, 'use_additional_classifier_layer': use_additional_classifier_layer, 'classifier_kernel_size': classifier_kernel_size, 'activation': activation, 'use_sync_bn': use_sync_bn, 'batchnorm_momentum': batchnorm_momentum, 'batchnorm_epsilon': batchnorm_epsilon, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'interpolation': interpolation, 'bias_regularizer': bias_regularizer } def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]], Union[tf.Tensor, Mapping[str, tf.Tensor]]], training: Optional[bool] = None) -> tf.Tensor: """Forward pass of the segmentation head. It supports a tuple of 2 elements. Each element is a tensor or a tensor dictionary. The first one is the final (low-resolution) encoder endpoints, and the second one is higher-resolution backbone endpoints. When inputs are tensors, they are from a single level of feature maps. When inputs are dictionaries, they contain multiple levels of feature maps, where the key is the level/index of feature map. Note: 'level' denotes the number of 2x downsampling, defined in backbone. Args: inputs: A tuple of 2 elements, each element can either be a tensor representing feature maps or 1 dictionary of tensors: - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor` of the feature map tensors. The first is encoder endpoints, and the second is backbone endpoints. training: a `Boolean` indicating whether it is in `training` mode. Returns: segmentation mask prediction logits: A `tf.Tensor` representing the output logits before the final segmentation mask. """ encoder_outputs = inputs[0] backbone_outputs = inputs[1] y = encoder_outputs[str( self._config_dict['encoder_end_level'])] if isinstance( encoder_outputs, dict) else encoder_outputs if isinstance(backbone_outputs, dict): for level, merge_stage in zip( self._decoder_input_levels, self._merge_stages): x = backbone_outputs[str(level)] y = merge_stage([y, x], training=training) else: x = backbone_outputs y = self._merge_stages[0]([y, x], training=training) if self._use_additional_classifier_layer: y = self._pixelwise_classifier(y) y = self._activation_fn(y) return y def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" base_config = super().get_config() base_config.update(self._config_dict) return base_config @classmethod def from_config(cls, config: Dict[str, Any]): return cls(**config)
11,284
45.440329
80
py
models
models-master/official/projects/mosaic/modeling/mosaic_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the overall MOSAIC segmentation network modeling.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.mosaic.modeling import mosaic_blocks from official.projects.mosaic.modeling import mosaic_head from official.projects.mosaic.modeling import mosaic_model from official.vision.modeling import backbones from official.vision.modeling.heads import segmentation_heads class SegmentationNetworkTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, [4, 8], [3, 2], ['concat_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['sum_merge', 'sum_merge']), (128, [1, 4, 8], [3, 2], ['concat_merge', 'concat_merge']), (512, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']), (256, [4, 8], [3, 2], ['concat_merge', 'sum_merge']), (256, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']), (256, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']), ) def test_mosaic_segmentation_model(self, input_size, pyramid_pool_bin_nums, decoder_input_levels, decoder_stage_merge_styles): """Test for building and calling of a MOSAIC segmentation network.""" num_classes = 32 inputs = np.random.rand(2, input_size, input_size, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.MobileNet(model_id='MobileNetMultiAVGSeg') encoder_input_level = 4 neck = mosaic_blocks.MosaicEncoderBlock( encoder_input_level=encoder_input_level, branch_filter_depths=[64, 64], conv_kernel_sizes=[3, 5], pyramid_pool_bin_nums=pyramid_pool_bin_nums) head = mosaic_head.MosaicDecoderHead( num_classes=num_classes, decoder_input_levels=decoder_input_levels, decoder_stage_merge_styles=decoder_stage_merge_styles, decoder_filters=[64, 64], decoder_projected_filters=[32, 32]) mask_scoring_head = segmentation_heads.MaskScoring( num_classes=num_classes, fc_input_size=[4, 4], num_convs=1, num_filters=32, fc_dims=32, num_fcs=1) model = mosaic_model.MosaicSegmentationModel( backbone=backbone, head=head, neck=neck, mask_scoring_head=mask_scoring_head, ) # Calls the MOSAIC model. outputs = model(inputs) level = min(decoder_input_levels) self.assertAllEqual( [2, input_size // (2**level), input_size // (2**level), num_classes], outputs['logits'].numpy().shape) self.assertAllEqual( [2, num_classes], outputs['mask_scores'].numpy().shape) def test_serialize_deserialize(self): """Validate the mosaic network can be serialized and deserialized.""" num_classes = 8 backbone = backbones.ResNet(model_id=50) neck = mosaic_blocks.MosaicEncoderBlock( encoder_input_level=4, branch_filter_depths=[64, 64], conv_kernel_sizes=[3, 5], pyramid_pool_bin_nums=[1, 4, 8, 16]) head = mosaic_head.MosaicDecoderHead( num_classes=num_classes, decoder_input_levels=[3, 2], decoder_stage_merge_styles=['concat_merge', 'sum_merge'], decoder_filters=[64, 64], decoder_projected_filters=[32, 8]) mask_scoring_head = segmentation_heads.MaskScoring( num_classes=num_classes, fc_input_size=[4, 4], num_convs=1, num_filters=32, fc_dims=32, num_fcs=1) model = mosaic_model.MosaicSegmentationModel( backbone=backbone, head=head, neck=neck, mask_scoring_head=mask_scoring_head, ) config = model.get_config() new_model = mosaic_model.MosaicSegmentationModel.from_config(config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) if __name__ == '__main__': tf.test.main()
4,825
36.123077
79
py
models
models-master/official/projects/mosaic/modeling/mosaic_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definitions of building blocks for MOSAIC model. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ from typing import Any, Dict, List, Optional, Tuple, Union import tensorflow as tf from official.modeling import tf_utils @tf.keras.utils.register_keras_serializable(package='Vision') class MultiKernelGroupConvBlock(tf.keras.layers.Layer): """A multi-kernel grouped convolution block. This block is used in the segmentation neck introduced in MOSAIC. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ def __init__( self, output_filter_depths: Optional[List[int]] = None, kernel_sizes: Optional[List[int]] = None, use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, use_depthwise_convolution: bool = True, **kwargs): """Initializes a Multi-kernel Grouped Convolution Block. Args: output_filter_depths: A list of integers representing the numbers of output channels or filter depths of convolution groups. kernel_sizes: A list of integers denoting the convolution kernel sizes in each convolution group. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for the activation fuction type. Defaults to 'relu'. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. use_depthwise_convolution: Allows spatial pooling to be separable depthwise convolusions. **kwargs: Other keyword arguments for the layer. """ super(MultiKernelGroupConvBlock, self).__init__(**kwargs) if output_filter_depths is None: output_filter_depths = [64, 64] if kernel_sizes is None: kernel_sizes = [3, 5] if len(output_filter_depths) != len(kernel_sizes): raise ValueError('The number of output groups must match #kernels.') self._output_filter_depths = output_filter_depths self._kernel_sizes = kernel_sizes self._num_groups = len(self._kernel_sizes) self._use_sync_bn = use_sync_bn self._batchnorm_momentum = batchnorm_momentum self._batchnorm_epsilon = batchnorm_epsilon self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._use_depthwise_convolution = use_depthwise_convolution # To apply BN before activation. Putting BN between conv and activation also # helps quantization where conv+bn+activation are fused into a single op. self._activation_fn = tf_utils.get_activation(activation) if self._use_sync_bn: self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization else: self._bn_op = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 self._group_split_axis = -1 else: self._bn_axis = 1 self._group_split_axis = 1 def build(self, input_shape: tf.TensorShape) -> None: """Builds the block with the given input shape.""" input_channels = input_shape[self._group_split_axis] if input_channels % self._num_groups != 0: raise ValueError('The number of input channels must be divisible by ' 'the number of groups for evenly group split.') self._conv_branches = [] if self._use_depthwise_convolution: for i, conv_kernel_size in enumerate(self._kernel_sizes): depthwise_conv = tf.keras.layers.DepthwiseConv2D( kernel_size=(conv_kernel_size, conv_kernel_size), depth_multiplier=1, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=self._kernel_initializer, use_bias=False) # Add BN->RELU after depthwise convolution. batchnorm_op_depthwise = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation_depthwise = self._activation_fn feature_conv = tf.keras.layers.Conv2D( filters=self._output_filter_depths[i], kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=None, use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) # Use list manually as current QAT API does not support sequential model # within a tf.keras.Sequential block, e.g. conv_branch = # tf.keras.Sequential([depthwise_conv, feature_conv, batchnorm_op,]) conv_branch = [ depthwise_conv, batchnorm_op_depthwise, activation_depthwise, feature_conv, batchnorm_op, ] self._conv_branches.append(conv_branch) else: for i, conv_kernel_size in enumerate(self._kernel_sizes): norm_conv = tf.keras.layers.Conv2D( filters=self._output_filter_depths[i], kernel_size=(conv_kernel_size, conv_kernel_size), padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, activation=None, use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) conv_branch = [norm_conv, batchnorm_op] self._conv_branches.append(conv_branch) self._concat_groups = tf.keras.layers.Concatenate( axis=self._group_split_axis) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: """Calls this group convolution block with the given inputs.""" inputs_splits = tf.split(inputs, num_or_size_splits=self._num_groups, axis=self._group_split_axis) output_branches = [] for i, x in enumerate(inputs_splits): conv_branch = self._conv_branches[i] # Apply layers sequentially and manually. for layer in conv_branch: if isinstance(layer, tf.keras.layers.Layer): x = layer(x, training=training) else: x = layer(x) # Apply activation function after BN, which also helps quantization # where conv+bn+activation are fused into a single op. x = self._activation_fn(x) output_branches.append(x) x = self._concat_groups(output_branches) return x def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" config = { 'output_filter_depths': self._output_filter_depths, 'kernel_sizes': self._kernel_sizes, 'num_groups': self._num_groups, 'use_sync_bn': self._use_sync_bn, 'batchnorm_momentum': self._batchnorm_momentum, 'batchnorm_epsilon': self._batchnorm_epsilon, 'activation': self._activation, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'use_depthwise_convolution': self._use_depthwise_convolution, } base_config = super(MultiKernelGroupConvBlock, self).get_config() base_config.update(config) return base_config @tf.keras.utils.register_keras_serializable(package='Vision') class MosaicEncoderBlock(tf.keras.layers.Layer): """Implements the encoder module/block of MOSAIC model. Spatial Pyramid Pooling and Multi-kernel Conv layer SpatialPyramidPoolingMultiKernelConv References: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded context](https://arxiv.org/pdf/2112.11623.pdf) """ def __init__( self, encoder_input_level: Optional[Union[str, int]] = '4', branch_filter_depths: Optional[List[int]] = None, conv_kernel_sizes: Optional[List[int]] = None, pyramid_pool_bin_nums: Optional[List[int]] = None, use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', dropout_rate: float = 0.1, kernel_initializer: str = 'glorot_uniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', use_depthwise_convolution: bool = True, **kwargs): """Initializes a MOSAIC encoder block which is deployed after a backbone. Args: encoder_input_level: An optional `str` or integer specifying the level of backbone outputs as the input to the encoder. branch_filter_depths: A list of integers for the number of convolution channels in each branch at a pyramid level after SpatialPyramidPooling. conv_kernel_sizes: A list of integers representing the convolution kernel sizes in the Multi-kernel Convolution blocks in the encoder. pyramid_pool_bin_nums: A list of integers for the number of bins at each level of the Spatial Pyramid Pooling. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for the activation function type. Defaults to 'relu'. dropout_rate: A float between 0 and 1. Fraction of the input units to drop out, which will be used directly as the `rate` of the Dropout layer at the end of the encoder. Defaults to 0.1. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. use_depthwise_convolution: Use depthwise separable convolusions in the Multi-kernel Convolution blocks in the encoder. **kwargs: Other keyword arguments for the layer. """ super().__init__(**kwargs) self._encoder_input_level = str(encoder_input_level) if branch_filter_depths is None: branch_filter_depths = [64, 64] self._branch_filter_depths = branch_filter_depths if conv_kernel_sizes is None: conv_kernel_sizes = [3, 5] self._conv_kernel_sizes = conv_kernel_sizes if pyramid_pool_bin_nums is None: pyramid_pool_bin_nums = [1, 4, 8, 16] self._pyramid_pool_bin_nums = pyramid_pool_bin_nums self._use_sync_bn = use_sync_bn self._batchnorm_momentum = batchnorm_momentum self._batchnorm_epsilon = batchnorm_epsilon self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._interpolation = interpolation self._use_depthwise_convolution = use_depthwise_convolution self._activation_fn = tf_utils.get_activation(activation) if self._use_sync_bn: self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization else: self._bn_op = tf.keras.layers.BatchNormalization self._dropout_rate = dropout_rate if dropout_rate: self._encoder_end_dropout_layer = tf.keras.layers.Dropout( rate=dropout_rate) else: self._encoder_end_dropout_layer = None if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 self._channel_axis = -1 else: self._bn_axis = 1 self._channel_axis = 1 def _get_bin_pool_kernel_and_stride( self, input_size: int, num_of_bin: int) -> Tuple[int, int]: """Calculates the kernel size and stride for spatial bin pooling. Args: input_size: Input dimension (a scalar). num_of_bin: The number of bins used for spatial bin pooling. Returns: The Kernel and Stride for spatial bin pooling (a scalar). """ bin_overlap = int(input_size % num_of_bin) pooling_stride = int(input_size // num_of_bin) pooling_kernel = pooling_stride + bin_overlap return pooling_kernel, pooling_stride def build( self, input_shape: Union[tf.TensorShape, Dict[str, tf.TensorShape]]) -> None: """Builds this MOSAIC encoder block with the given single input shape.""" input_shape = ( input_shape[self._encoder_input_level] if isinstance(input_shape, dict) else input_shape) self._data_format = tf.keras.backend.image_data_format() if self._data_format == 'channels_last': height = input_shape[1] width = input_shape[2] else: height = input_shape[2] width = input_shape[3] self._global_pool_branch = None self._spatial_pyramid = [] for pyramid_pool_bin_num in self._pyramid_pool_bin_nums: if pyramid_pool_bin_num == 1: global_pool = tf.keras.layers.GlobalAveragePooling2D( data_format=self._data_format, keepdims=True) global_projection = tf.keras.layers.Conv2D( filters=max(self._branch_filter_depths), kernel_size=(1, 1), padding='same', activation=None, kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, use_bias=False) batch_norm_global_branch = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) # Use list manually instead of tf.keras.Sequential([]) self._global_pool_branch = [ global_pool, global_projection, batch_norm_global_branch, ] else: if height < pyramid_pool_bin_num or width < pyramid_pool_bin_num: raise ValueError('The number of pooling bins must be smaller than ' 'input sizes.') assert pyramid_pool_bin_num >= 2, ( 'Except for the gloabl pooling, the number of bins in pyramid ' 'pooling must be at least two.') pool_height, stride_height = self._get_bin_pool_kernel_and_stride( height, pyramid_pool_bin_num) pool_width, stride_width = self._get_bin_pool_kernel_and_stride( width, pyramid_pool_bin_num) bin_pool_level = tf.keras.layers.AveragePooling2D( pool_size=(pool_height, pool_width), strides=(stride_height, stride_width), padding='valid', data_format=self._data_format) self._spatial_pyramid.append(bin_pool_level) # Grouped multi-kernel Convolution. self._multi_kernel_group_conv = MultiKernelGroupConvBlock( output_filter_depths=self._branch_filter_depths, kernel_sizes=self._conv_kernel_sizes, use_sync_bn=self._use_sync_bn, batchnorm_momentum=self._batchnorm_momentum, batchnorm_epsilon=self._batchnorm_epsilon, activation=self._activation, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_depthwise_convolution=self._use_depthwise_convolution) # Encoder's final 1x1 feature projection. # Considering the relatively large #channels merged before projection, # enlarge the projection #channels to the sum of the filter depths of # branches. self._output_channels = sum(self._branch_filter_depths) # Use list manually instead of tf.keras.Sequential([]). self._encoder_projection = [ tf.keras.layers.Conv2D( filters=self._output_channels, kernel_size=(1, 1), padding='same', activation=None, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_bias=False), self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon), ] # Use the TF2 default feature alignment rule for bilinear resizing. self._upsample = tf.keras.layers.Resizing( height, width, interpolation=self._interpolation, crop_to_aspect_ratio=False) self._concat_layer = tf.keras.layers.Concatenate(axis=self._channel_axis) def call(self, inputs: Union[tf.Tensor, Dict[str, tf.Tensor]], training: Optional[bool] = None) -> tf.Tensor: """Calls this MOSAIC encoder block with the given input.""" if training is None: training = tf.keras.backend.learning_phase() input_from_backbone_output = ( inputs[self._encoder_input_level] if isinstance(inputs, dict) else inputs) branches = [] # Original features from the final output of the backbone. branches.append(input_from_backbone_output) if self._spatial_pyramid: for bin_pool_level in self._spatial_pyramid: x = input_from_backbone_output x = bin_pool_level(x) x = self._multi_kernel_group_conv(x, training=training) x = self._upsample(x) branches.append(x) if self._global_pool_branch is not None: x = input_from_backbone_output for layer in self._global_pool_branch: x = layer(x, training=training) x = self._activation_fn(x) x = self._upsample(x) branches.append(x) x = self._concat_layer(branches) for layer in self._encoder_projection: x = layer(x, training=training) x = self._activation_fn(x) if self._encoder_end_dropout_layer is not None: x = self._encoder_end_dropout_layer(x, training=training) return x def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" config = { 'encoder_input_level': self._encoder_input_level, 'branch_filter_depths': self._branch_filter_depths, 'conv_kernel_sizes': self._conv_kernel_sizes, 'pyramid_pool_bin_nums': self._pyramid_pool_bin_nums, 'use_sync_bn': self._use_sync_bn, 'batchnorm_momentum': self._batchnorm_momentum, 'batchnorm_epsilon': self._batchnorm_epsilon, 'activation': self._activation, 'dropout_rate': self._dropout_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'interpolation': self._interpolation, 'use_depthwise_convolution': self._use_depthwise_convolution, } base_config = super().get_config() base_config.update(config) return base_config @tf.keras.utils.register_keras_serializable(package='Vision') class DecoderSumMergeBlock(tf.keras.layers.Layer): """Implements the decoder feature sum merge block of MOSAIC model. This block is used in the decoder of segmentation head introduced in MOSAIC. It essentially merges a high-resolution feature map of a low semantic level and a low-resolution feature map of a higher semantic level by 'Sum-Merge'. """ def __init__( self, decoder_projected_depth: int, output_size: Tuple[int, int] = (0, 0), use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', **kwargs): """Initialize a sum-merge block for one decoder stage. Args: decoder_projected_depth: An integer representing the number of output channels of this sum-merge block in the decoder. output_size: A Tuple of integers representing the output height and width of the feature maps from this sum-merge block. Defaults to (0, 0), where the output size is set the same as the high-resolution branch. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for the activation function type. Defaults to 'relu'. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. **kwargs: Other keyword arguments for the layer. """ super(DecoderSumMergeBlock, self).__init__(**kwargs) self._decoder_projected_depth = decoder_projected_depth self._output_size = output_size self._low_res_branch = [] self._upsample_low_res = None self._high_res_branch = [] self._upsample_high_res = None self._use_sync_bn = use_sync_bn self._batchnorm_momentum = batchnorm_momentum self._batchnorm_epsilon = batchnorm_epsilon self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._interpolation = interpolation # Apply BN before activation. Putting BN between conv and activation also # helps quantization where conv+bn+activation are fused into a single op. self._activation_fn = tf_utils.get_activation(activation) if self._use_sync_bn: self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization else: self._bn_op = tf.keras.layers.BatchNormalization self._bn_axis = ( -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1) self._channel_axis = ( -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1) self._add_layer = tf.keras.layers.Add() def build( self, input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None: """Builds the block with the given input shape.""" # Assume backbone features of the same level are concated before input. low_res_input_shape = input_shape[0] high_res_input_shape = input_shape[1] low_res_channels = low_res_input_shape[self._channel_axis] high_res_channels = high_res_input_shape[self._channel_axis] if low_res_channels != self._decoder_projected_depth: low_res_feature_conv = tf.keras.layers.Conv2D( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=None, use_bias=False) batchnorm_op = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self._low_res_branch.extend([ low_res_feature_conv, batchnorm_op, ]) if high_res_channels != self._decoder_projected_depth: high_res_feature_conv = tf.keras.layers.Conv2D( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=None, use_bias=False) batchnorm_op_high = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) self._high_res_branch.extend([ high_res_feature_conv, batchnorm_op_high, ]) # Resize feature maps. if tf.keras.backend.image_data_format() == 'channels_last': low_res_height = low_res_input_shape[1] low_res_width = low_res_input_shape[2] high_res_height = high_res_input_shape[1] high_res_width = high_res_input_shape[2] else: low_res_height = low_res_input_shape[2] low_res_width = low_res_input_shape[3] high_res_height = high_res_input_shape[2] high_res_width = high_res_input_shape[3] if (self._output_size[0] == 0 or self._output_size[1] == 0): self._output_size = (high_res_height, high_res_width) if (low_res_height != self._output_size[0] or low_res_width != self._output_size[1]): self._upsample_low_res = tf.keras.layers.Resizing( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) if (high_res_height != self._output_size[0] or high_res_width != self._output_size[1]): self._upsample_high_res = tf.keras.layers.Resizing( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) def call(self, inputs: Tuple[tf.Tensor, tf.Tensor], training: Optional[bool] = None) -> tf.Tensor: """Calls this decoder sum-merge block with the given input. Args: inputs: A Tuple of tensors consisting of a low-resolution higher-semantic level feature map from the encoder as the first item and a higher resolution lower-level feature map from the backbone as the second item. training: a `bool` indicating whether it is in `training` mode. Note: the first item of the input Tuple takes a lower-resolution feature map and the second item of the input Tuple takes a higher-resolution branch. Returns: A tensor representing the sum-merged decoder feature map. """ if training is None: training = tf.keras.backend.learning_phase() x_low_res = inputs[0] x_high_res = inputs[1] if self._low_res_branch: for layer in self._low_res_branch: x_low_res = layer(x_low_res, training=training) x_low_res = self._activation_fn(x_low_res) if self._high_res_branch: for layer in self._high_res_branch: x_high_res = layer(x_high_res, training=training) x_high_res = self._activation_fn(x_high_res) if self._upsample_low_res is not None: x_low_res = self._upsample_low_res(x_low_res) if self._upsample_high_res is not None: x_high_res = self._upsample_high_res(x_high_res) output = self._add_layer([x_low_res, x_high_res]) return output def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" config = { 'decoder_projected_depth': self._decoder_projected_depth, 'output_size': self._output_size, 'use_sync_bn': self._use_sync_bn, 'batchnorm_momentum': self._batchnorm_momentum, 'batchnorm_epsilon': self._batchnorm_epsilon, 'activation': self._activation, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'interpolation': self._interpolation, } base_config = super(DecoderSumMergeBlock, self).get_config() base_config.update(config) return base_config @tf.keras.utils.register_keras_serializable(package='Vision') class DecoderConcatMergeBlock(tf.keras.layers.Layer): """Implements the decoder feature concat merge block of MOSAIC model. This block is used in the decoder of segmentation head introduced in MOSAIC. It essentially merges a high-resolution feature map of a low semantic level and a low-resolution feature of a higher semantic level by 'Concat-Merge'. """ def __init__( self, decoder_internal_depth: int, decoder_projected_depth: int, output_size: Tuple[int, int] = (0, 0), use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', **kwargs): """Initializes a concat-merge block for one decoder stage. Args: decoder_internal_depth: An integer representing the number of internal channels of this concat-merge block in the decoder. decoder_projected_depth: An integer representing the number of output channels of this concat-merge block in the decoder. output_size: A Tuple of integers representing the output height and width of the feature maps from this concat-merge block. Defaults to (0, 0), where the output size is set the same as the high-resolution branch. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for the activation function type. Defaults to 'relu'. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. **kwargs: Other keyword arguments for the layer. """ super(DecoderConcatMergeBlock, self).__init__(**kwargs) self._decoder_internal_depth = decoder_internal_depth self._decoder_projected_depth = decoder_projected_depth self._output_size = output_size self._upsample_low_res = None self._upsample_high_res = None self._use_sync_bn = use_sync_bn self._batchnorm_momentum = batchnorm_momentum self._batchnorm_epsilon = batchnorm_epsilon self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._interpolation = interpolation # Apply BN before activation. Putting BN between conv and activation also # helps quantization where conv+bn+activation are fused into a single op. self._activation_fn = tf_utils.get_activation(activation) if self._use_sync_bn: self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization else: self._bn_op = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 self._channel_axis = -1 else: self._bn_axis = 1 self._channel_axis = 1 def build( self, input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None: """Builds this block with the given input shape.""" # Assume backbone features of the same level are concated before input. low_res_input_shape = input_shape[0] high_res_input_shape = input_shape[1] # Set up resizing feature maps before concat. if tf.keras.backend.image_data_format() == 'channels_last': low_res_height = low_res_input_shape[1] low_res_width = low_res_input_shape[2] high_res_height = high_res_input_shape[1] high_res_width = high_res_input_shape[2] else: low_res_height = low_res_input_shape[2] low_res_width = low_res_input_shape[3] high_res_height = high_res_input_shape[2] high_res_width = high_res_input_shape[3] if (self._output_size[0] == 0 or self._output_size[1] == 0): self._output_size = (high_res_height, high_res_width) if (low_res_height != self._output_size[0] or low_res_width != self._output_size[1]): self._upsample_low_res = tf.keras.layers.Resizing( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) if (high_res_height != self._output_size[0] or high_res_width != self._output_size[1]): self._upsample_high_res = tf.keras.layers.Resizing( self._output_size[0], self._output_size[1], interpolation=self._interpolation, crop_to_aspect_ratio=False) # Set up a 3-layer separable convolution blocks, i.e. # 1x1->BN->RELU + Depthwise->BN->RELU + 1x1->BN->RELU. initial_feature_conv = tf.keras.layers.Conv2D( filters=self._decoder_internal_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=None, use_bias=False) batchnorm_op1 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation1 = self._activation_fn depthwise_conv = tf.keras.layers.DepthwiseConv2D( kernel_size=(3, 3), depth_multiplier=1, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=self._kernel_initializer, use_bias=False) batchnorm_op2 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation2 = self._activation_fn project_feature_conv = tf.keras.layers.Conv2D( filters=self._decoder_projected_depth, kernel_size=(1, 1), padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, activation=None, use_bias=False) batchnorm_op3 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon) activation3 = self._activation_fn self._feature_fusion_block = [ initial_feature_conv, batchnorm_op1, activation1, depthwise_conv, batchnorm_op2, activation2, project_feature_conv, batchnorm_op3, activation3, ] self._concat_layer = tf.keras.layers.Concatenate(axis=self._channel_axis) def call(self, inputs: Tuple[tf.Tensor, tf.Tensor], training: Optional[bool] = None) -> tf.Tensor: """Calls this concat-merge block with the given inputs. Args: inputs: A Tuple of tensors consisting of a lower-level higher-resolution feature map from the backbone as the first item and a higher-level lower-resolution feature map from the encoder as the second item. training: a `Boolean` indicating whether it is in `training` mode. Returns: A tensor representing the concat-merged decoder feature map. """ low_res_input = inputs[0] high_res_input = inputs[1] if self._upsample_low_res is not None: low_res_input = self._upsample_low_res(low_res_input) if self._upsample_high_res is not None: high_res_input = self._upsample_high_res(high_res_input) decoder_feature_list = [low_res_input, high_res_input] x = self._concat_layer(decoder_feature_list) for layer in self._feature_fusion_block: if isinstance(layer, tf.keras.layers.Layer): x = layer(x, training=training) else: x = layer(x) return x def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" config = { 'decoder_internal_depth': self._decoder_internal_depth, 'decoder_projected_depth': self._decoder_projected_depth, 'output_size': self._output_size, 'use_sync_bn': self._use_sync_bn, 'batchnorm_momentum': self._batchnorm_momentum, 'batchnorm_epsilon': self._batchnorm_epsilon, 'activation': self._activation, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'interpolation': self._interpolation, } base_config = super(DecoderConcatMergeBlock, self).get_config() base_config.update(config) return base_config
36,947
40.702032
80
py
models
models-master/official/projects/mosaic/modeling/mosaic_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Builds the overall MOSAIC segmentation models.""" from typing import Any, Dict, Optional, Union import tensorflow as tf from official.projects.mosaic.configs import mosaic_config from official.projects.mosaic.modeling import mosaic_blocks from official.projects.mosaic.modeling import mosaic_head from official.vision.modeling import backbones from official.vision.modeling.heads import segmentation_heads @tf.keras.utils.register_keras_serializable(package='Vision') class MosaicSegmentationModel(tf.keras.Model): """A model class for segmentation using MOSAIC. Input images are passed through a backbone first. A MOSAIC neck encoder network is then applied, and finally a MOSAIC segmentation head is applied on the outputs of the backbone and neck encoder network. Feature fusion and decoding is done in the segmentation head. Reference: [MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded Context](https://arxiv.org/pdf/2112.11623.pdf) """ def __init__(self, backbone: tf.keras.Model, head: tf.keras.layers.Layer, neck: Optional[tf.keras.layers.Layer] = None, mask_scoring_head: Optional[tf.keras.layers.Layer] = None, **kwargs): """Segmentation initialization function. Args: backbone: A backbone network. head: A segmentation head, e.g. MOSAIC decoder. neck: An optional neck encoder network, e.g. MOSAIC encoder. If it is not provided, the decoder head will be connected directly with the backbone. mask_scoring_head: An optional mask scoring head. **kwargs: keyword arguments to be passed. """ super(MosaicSegmentationModel, self).__init__(**kwargs) self._config_dict = { 'backbone': backbone, 'neck': neck, 'head': head, 'mask_scoring_head': mask_scoring_head, } self.backbone = backbone self.neck = neck self.head = head self.mask_scoring_head = mask_scoring_head def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks inputs: tf.Tensor, training: bool = None) -> Dict[str, tf.Tensor]: backbone_features = self.backbone(inputs) if self.neck is not None: neck_features = self.neck(backbone_features, training=training) else: neck_features = backbone_features logits = self.head([neck_features, backbone_features], training=training) outputs = {'logits': logits} if self.mask_scoring_head: mask_scores = self.mask_scoring_head(logits) outputs.update({'mask_scores': mask_scores}) return outputs @property def checkpoint_items( self) -> Dict[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict(backbone=self.backbone, head=self.head) if self.neck is not None: items.update(neck=self.neck) if self.mask_scoring_head is not None: items.update(mask_scoring_head=self.mask_scoring_head) return items def get_config(self) -> Dict[str, Any]: """Returns a config dictionary for initialization from serialization.""" base_config = super().get_config() model_config = base_config model_config.update(self._config_dict) return model_config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def build_mosaic_segmentation_model( input_specs: tf.keras.layers.InputSpec, model_config: mosaic_config.MosaicSemanticSegmentationModel, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, backbone: Optional[tf.keras.Model] = None, neck: Optional[tf.keras.layers.Layer] = None ) -> tf.keras.Model: """Builds MOSAIC Segmentation model.""" norm_activation_config = model_config.norm_activation if backbone is None: backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) if neck is None: neck_config = model_config.neck neck = mosaic_blocks.MosaicEncoderBlock( encoder_input_level=neck_config.encoder_input_level, branch_filter_depths=neck_config.branch_filter_depths, conv_kernel_sizes=neck_config.conv_kernel_sizes, pyramid_pool_bin_nums=neck_config.pyramid_pool_bin_nums, use_sync_bn=norm_activation_config.use_sync_bn, batchnorm_momentum=norm_activation_config.norm_momentum, batchnorm_epsilon=norm_activation_config.norm_epsilon, activation=neck_config.activation, dropout_rate=neck_config.dropout_rate, kernel_initializer=neck_config.kernel_initializer, kernel_regularizer=l2_regularizer, interpolation=neck_config.interpolation, use_depthwise_convolution=neck_config.use_depthwise_convolution) head_config = model_config.head head = mosaic_head.MosaicDecoderHead( num_classes=model_config.num_classes, decoder_input_levels=head_config.decoder_input_levels, decoder_stage_merge_styles=head_config.decoder_stage_merge_styles, decoder_filters=head_config.decoder_filters, decoder_projected_filters=head_config.decoder_projected_filters, encoder_end_level=head_config.encoder_end_level, use_additional_classifier_layer=head_config .use_additional_classifier_layer, classifier_kernel_size=head_config.classifier_kernel_size, activation=head_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, batchnorm_momentum=norm_activation_config.norm_momentum, batchnorm_epsilon=norm_activation_config.norm_epsilon, kernel_initializer=head_config.kernel_initializer, kernel_regularizer=l2_regularizer, interpolation=head_config.interpolation) mask_scoring_head = None if model_config.mask_scoring_head: mask_scoring_head = segmentation_heads.MaskScoring( num_classes=model_config.num_classes, **model_config.mask_scoring_head.as_dict(), activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer) model = MosaicSegmentationModel( backbone=backbone, neck=neck, head=head, mask_scoring_head=mask_scoring_head) return model
7,164
38.805556
91
py
models
models-master/official/projects/centernet/utils/tf2_centernet_checkpoint_converter.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A converter from a tf1 OD API checkpoint to a tf2 checkpoint.""" from absl import app from absl import flags from absl import logging import tensorflow as tf from official.projects.centernet.common import registry_imports # pylint: disable=unused-import from official.projects.centernet.configs import backbones from official.projects.centernet.configs import centernet from official.projects.centernet.modeling import centernet_model from official.projects.centernet.modeling.heads import centernet_head from official.projects.centernet.modeling.layers import detection_generator from official.projects.centernet.utils.checkpoints import load_weights from official.projects.centernet.utils.checkpoints import read_checkpoints from official.vision.modeling.backbones import factory FLAGS = flags.FLAGS flags.DEFINE_string("checkpoint_to_convert", None, "Initial checkpoint from a pretrained model.") flags.DEFINE_string("checkpoint_backbone_name", "hourglass104_512", "IIndicate the desired backbone configuration.") flags.DEFINE_string("checkpoint_head_name", "detection_2d", "Indicate the desired head configuration.") flags.DEFINE_string("converted_checkpoint_path", None, "Output path of converted checkpoint.") flags.DEFINE_integer("hourglass_id", 52, "Model id of hourglass backbone.") flags.DEFINE_integer("num_hourglasses", 2, "Number of hourglass blocks in backbone.") def _create_centernet_model(model_id: int = 52, num_hourglasses: int = 2 ) -> centernet_model.CenterNetModel: """Create centernet model to load TF1 weights.""" task_config = centernet.CenterNetTask( model=centernet.CenterNetModel( backbone=backbones.Backbone( type="hourglass", hourglass=backbones.Hourglass( model_id=model_id, num_hourglasses=num_hourglasses)))) model_config = task_config.model backbone = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[1, 512, 512, 3]), backbone_config=model_config.backbone, norm_activation_config=model_config.norm_activation) task_outputs = task_config.get_output_length_dict() head = centernet_head.CenterNetHead( input_specs=backbone.output_specs, task_outputs=task_outputs, input_levels=model_config.head.input_levels) detect_generator_obj = detection_generator.CenterNetDetectionGenerator() model = centernet_model.CenterNetModel( backbone=backbone, head=head, detection_generator=detect_generator_obj) logging.info("Successfully created centernet model.") return model def _load_weights(model: centernet_model.CenterNetModel, ckpt_dir_or_file: str, ckpt_backbone_name: str, ckpt_head_name: str): """Read TF1 checkpoint and load the weights to centernet model.""" weights_dict, _ = read_checkpoints.get_ckpt_weights_as_dict( ckpt_dir_or_file) load_weights.load_weights_model( model=model, weights_dict=weights_dict, backbone_name=ckpt_backbone_name, head_name=ckpt_head_name) def _save_checkpoint(model: centernet_model.CenterNetModel, ckpt_dir: str): """Save the TF2 centernet model checkpoint.""" checkpoint = tf.train.Checkpoint(model=model, **model.checkpoint_items) manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir, max_to_keep=3) manager.save() logging.info("Save checkpoint to %s.", ckpt_dir) def convert_checkpoint(model_id: int, num_hourglasses: int, ckpt_dir_or_file: str, ckpt_backbone_name: str, ckpt_head_name: str, output_ckpt_dir: str): """Convert the TF1 OD API checkpoint to a tf2 checkpoint.""" model = _create_centernet_model( model_id=model_id, num_hourglasses=num_hourglasses) _load_weights( model=model, ckpt_dir_or_file=ckpt_dir_or_file, ckpt_backbone_name=ckpt_backbone_name, ckpt_head_name=ckpt_head_name) _save_checkpoint( model=model, ckpt_dir=output_ckpt_dir) def main(_): convert_checkpoint( model_id=FLAGS.hourglass_id, num_hourglasses=FLAGS.num_hourglasses, ckpt_dir_or_file=FLAGS.checkpoint_to_convert, ckpt_backbone_name=FLAGS.checkpoint_backbone_name, ckpt_head_name=FLAGS.checkpoint_head_name, output_ckpt_dir=FLAGS.converted_checkpoint_path) if __name__ == "__main__": app.run(main)
5,327
37.890511
96
py
models
models-master/official/projects/centernet/utils/checkpoints/load_weights.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions used to load the ODAPI CenterNet checkpoint.""" from official.projects.centernet.modeling.layers import cn_nn_blocks from official.projects.centernet.utils.checkpoints import config_classes from official.projects.centernet.utils.checkpoints import config_data from official.vision.modeling.backbones import mobilenet from official.vision.modeling.layers import nn_blocks Conv2DBNCFG = config_classes.Conv2DBNCFG HeadConvCFG = config_classes.HeadConvCFG ResidualBlockCFG = config_classes.ResidualBlockCFG HourglassCFG = config_classes.HourglassCFG BackboneConfigData = config_data.BackboneConfigData HeadConfigData = config_data.HeadConfigData def get_backbone_layer_cfgs(weights_dict, backbone_name): """Fetches the config classes for the backbone. This function generates a list of config classes corresponding to each building block in the backbone. Args: weights_dict: Dictionary that stores the backbone model weights. backbone_name: String, indicating the desired backbone configuration. Returns: A list containing the config classe of the backbone building block """ print("Fetching backbone config classes for {}\n".format(backbone_name)) cfgs = BackboneConfigData(weights_dict=weights_dict).get_cfg_list( backbone_name) return cfgs def load_weights_backbone(backbone, weights_dict, backbone_name): """Loads the weights defined in the weights_dict into the backbone. This function loads the backbone weights by first fetching the necessary config classes for the backbone, then loads them in one by one for each layer that has weights associated with it. Args: backbone: keras.Model backbone. weights_dict: Dictionary that stores the backbone model weights. backbone_name: String, indicating the desired backbone configuration. Returns: Number of weights loaded in """ print("Loading backbone weights\n") backbone_layers = backbone.layers cfgs = get_backbone_layer_cfgs(weights_dict, backbone_name) n_weights_total = 0 cfg = cfgs.pop(0) for i in range(len(backbone_layers)): layer = backbone_layers[i] if isinstance(layer, (mobilenet.Conv2DBNBlock, cn_nn_blocks.HourglassBlock, nn_blocks.ResidualBlock)): n_weights = cfg.load_weights(layer) print("Loading weights for: {}, weights loaded: {}".format( cfg, n_weights)) n_weights_total += n_weights # pylint: disable=g-explicit-length-test if len(cfgs) == 0: print("{} Weights have been loaded for {} / {} layers\n".format( n_weights_total, i + 1, len(backbone_layers))) return n_weights_total cfg = cfgs.pop(0) return n_weights_total def get_head_layer_cfgs(weights_dict, head_name): """Fetches the config classes for the head. This function generates a list of config classes corresponding to each building block in the head. Args: weights_dict: Dictionary that stores the decoder model weights. head_name: String, indicating the desired head configuration. Returns: A list containing the config classes of the backbone building block """ print("Fetching head config classes for {}\n".format(head_name)) cfgs = HeadConfigData(weights_dict=weights_dict).get_cfg_list(head_name) return cfgs def load_weights_head(head, weights_dict, head_name): """Loads the weights defined in the weights_dict into the head. This function loads the head weights by first fetching the necessary config classes for the decoder, then loads them in one by one for each layer that has weights associated with it. Args: head: keras.Model head. weights_dict: Dictionary that stores the decoder model weights. head_name: String, indicating the desired head configuration. Returns: Number of weights loaded in """ print("Loading head weights\n") head_layers = head.layers cfgs = get_head_layer_cfgs(weights_dict, head_name) n_weights_total = 0 cfg = cfgs.pop(0) for i in range(len(head_layers)): layer = head_layers[i] if isinstance(layer, cn_nn_blocks.CenterNetHeadConv): n_weights = cfg.load_weights(layer) print("Loading weights for: {}, weights loaded: {}".format( cfg, n_weights)) n_weights_total += n_weights # pylint: disable=g-explicit-length-test if len(cfgs) == 0: print("{} Weights have been loaded for {} / {} layers\n".format( n_weights_total, i + 1, len(head_layers))) return n_weights_total cfg = cfgs.pop(0) return n_weights_total def load_weights_model(model, weights_dict, backbone_name, head_name): """Loads weights into the model. Args: model: keras.Model to load weights into. weights_dict: Dictionary that stores the weights of the model. backbone_name: String, indicating the desired backbone configuration. head_name: String, indicating the desired head configuration. Returns: """ print("Loading model weights\n") n_weights = 0 if backbone_name: n_weights += load_weights_backbone( model.backbone, weights_dict["model"]["_feature_extractor"]["_network"], backbone_name) if head_name: n_weights += load_weights_head( model.head, weights_dict["model"]["_prediction_head_dict"], head_name) print("Successfully loaded {} model weights.\n".format(n_weights)) return model
6,042
33.729885
74
py
models
models-master/official/projects/centernet/utils/checkpoints/config_classes.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layer config for parsing ODAPI checkpoint. This file contains the layers (Config objects) that are used for parsing the ODAPI checkpoint weights for CenterNet. Currently, the parser is incomplete and has only been tested on CenterNet Hourglass-104 512x512. """ import abc import dataclasses from typing import Dict, Optional import numpy as np import tensorflow as tf class Config(abc.ABC): """Base config class.""" def get_weights(self): """Generates the weights needed to be loaded into the layer.""" raise NotImplementedError def load_weights(self, layer: tf.keras.layers.Layer) -> int: """Assign weights to layer. Given a layer, this function retrieves the weights for that layer in an appropriate format and order, and loads them into the layer. Additionally, the number of weights loaded are returned. If the weights are in an incorrect format, a ValueError will be raised by set_weights(). Args: layer: A `tf.keras.layers.Layer`. Returns: """ weights = self.get_weights() layer.set_weights(weights) n_weights = 0 for w in weights: n_weights += w.size return n_weights @dataclasses.dataclass class Conv2DBNCFG(Config): """Config class for Conv2DBN block.""" weights_dict: Optional[Dict[str, np.ndarray]] = dataclasses.field( repr=False, default=None) weights: Optional[np.ndarray] = dataclasses.field(repr=False, default=None) beta: Optional[np.ndarray] = dataclasses.field(repr=False, default=None) gamma: Optional[np.ndarray] = dataclasses.field(repr=False, default=None) moving_mean: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) moving_variance: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) def __post_init__(self): conv_weights_dict = self.weights_dict['conv'] norm_weights_dict = self.weights_dict['norm'] self.weights = conv_weights_dict['kernel'] self.beta = norm_weights_dict['beta'] self.gamma = norm_weights_dict['gamma'] self.moving_mean = norm_weights_dict['moving_mean'] self.moving_variance = norm_weights_dict['moving_variance'] def get_weights(self): return [ self.weights, self.gamma, self.beta, self.moving_mean, self.moving_variance ] @dataclasses.dataclass class ResidualBlockCFG(Config): """Config class for Residual block.""" weights_dict: Optional[Dict[str, np.ndarray]] = dataclasses.field( repr=False, default=None) skip_weights: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) skip_beta: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) skip_gamma: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) skip_moving_mean: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) skip_moving_variance: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_weights: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) norm_beta: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) norm_gamma: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) norm_moving_mean: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) norm_moving_variance: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_block_weights: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_block_beta: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_block_gamma: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_block_moving_mean: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_block_moving_variance: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) def __post_init__(self): conv_weights_dict = self.weights_dict['conv'] norm_weights_dict = self.weights_dict['norm'] conv_block_weights_dict = self.weights_dict['conv_block'] if 'skip' in self.weights_dict: skip_weights_dict = self.weights_dict['skip'] self.skip_weights = skip_weights_dict['conv']['kernel'] self.skip_beta = skip_weights_dict['norm']['beta'] self.skip_gamma = skip_weights_dict['norm']['gamma'] self.skip_moving_mean = skip_weights_dict['norm']['moving_mean'] self.skip_moving_variance = skip_weights_dict['norm']['moving_variance'] self.conv_weights = conv_weights_dict['kernel'] self.norm_beta = norm_weights_dict['beta'] self.norm_gamma = norm_weights_dict['gamma'] self.norm_moving_mean = norm_weights_dict['moving_mean'] self.norm_moving_variance = norm_weights_dict['moving_variance'] self.conv_block_weights = conv_block_weights_dict['conv']['kernel'] self.conv_block_beta = conv_block_weights_dict['norm']['beta'] self.conv_block_gamma = conv_block_weights_dict['norm']['gamma'] self.conv_block_moving_mean = conv_block_weights_dict['norm']['moving_mean'] self.conv_block_moving_variance = conv_block_weights_dict['norm'][ 'moving_variance'] def get_weights(self): weights = [ self.skip_weights, self.skip_gamma, self.skip_beta, self.conv_block_weights, self.conv_block_gamma, self.conv_block_beta, self.conv_weights, self.norm_gamma, self.norm_beta, self.skip_moving_mean, self.skip_moving_variance, self.conv_block_moving_mean, self.conv_block_moving_variance, self.norm_moving_mean, self.norm_moving_variance, ] weights = [x for x in weights if x is not None] return weights @dataclasses.dataclass class HeadConvCFG(Config): """Config class for HeadConv block.""" weights_dict: Optional[Dict[str, np.ndarray]] = dataclasses.field( repr=False, default=None) conv_1_weights: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_1_bias: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_2_weights: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) conv_2_bias: Optional[np.ndarray] = dataclasses.field( repr=False, default=None) def __post_init__(self): conv_1_weights_dict = self.weights_dict['layer_with_weights-0'] conv_2_weights_dict = self.weights_dict['layer_with_weights-1'] self.conv_1_weights = conv_1_weights_dict['kernel'] self.conv_1_bias = conv_1_weights_dict['bias'] self.conv_2_weights = conv_2_weights_dict['kernel'] self.conv_2_bias = conv_2_weights_dict['bias'] def get_weights(self): return [ self.conv_1_weights, self.conv_1_bias, self.conv_2_weights, self.conv_2_bias ] @dataclasses.dataclass class HourglassCFG(Config): """Config class for Hourglass block.""" weights_dict: Optional[Dict[str, np.ndarray]] = dataclasses.field( repr=False, default=None) is_last_stage: bool = dataclasses.field(repr=False, default=None) def __post_init__(self): self.is_last_stage = False if 'inner_block' in self.weights_dict else True def get_weights(self): """It is not used in this class.""" return None def generate_block_weights(self, weights_dict): """Convert weights dict to blocks structure.""" reps = len(weights_dict.keys()) weights = [] n_weights = 0 for i in range(reps): res_config = ResidualBlockCFG(weights_dict=weights_dict[str(i)]) res_weights = res_config.get_weights() weights += res_weights for w in res_weights: n_weights += w.size return weights, n_weights def load_block_weights(self, layer, weight_dict): block_weights, n_weights = self.generate_block_weights(weight_dict) layer.set_weights(block_weights) return n_weights def load_weights(self, layer): n_weights = 0 if not self.is_last_stage: enc_dec_layers = [ layer.submodules[0], layer.submodules[1], layer.submodules[3] ] enc_dec_weight_dicts = [ self.weights_dict['encoder_block1'], self.weights_dict['encoder_block2'], self.weights_dict['decoder_block'] ] for l, weights_dict in zip(enc_dec_layers, enc_dec_weight_dicts): n_weights += self.load_block_weights(l, weights_dict) if len(self.weights_dict['inner_block']) == 1: # still in an outer hourglass inner_weights_dict = self.weights_dict['inner_block']['0'] else: # inner residual block chain inner_weights_dict = self.weights_dict['inner_block'] inner_hg_layer = layer.submodules[2] inner_hg_cfg = type(self)(weights_dict=inner_weights_dict) n_weights += inner_hg_cfg.load_weights(inner_hg_layer) else: inner_layer = layer.submodules[0] n_weights += self.load_block_weights(inner_layer, self.weights_dict) return n_weights
9,665
31.436242
80
py
models
models-master/official/projects/centernet/modeling/centernet_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Centernet detection models.""" from typing import Mapping, Union, Any import tensorflow as tf class CenterNetModel(tf.keras.Model): """CenterNet Model.""" def __init__(self, backbone: tf.keras.Model, head: tf.keras.Model, detection_generator: tf.keras.layers.Layer, **kwargs): """CenterNet Model. Args: backbone: a backbone network. head: a projection head for centernet. detection_generator: a detection generator for centernet. **kwargs: keyword arguments to be passed. """ super(CenterNetModel, self).__init__(**kwargs) # model components self._backbone = backbone self._detection_generator = detection_generator self._head = head def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks inputs: tf.Tensor, training: bool = None, **kwargs) -> Mapping[str, tf.Tensor]: features = self._backbone(inputs) raw_outputs = self._head(features) model_outputs = {'raw_output': raw_outputs} if not training: predictions = self._detection_generator(raw_outputs) model_outputs.update(predictions) return model_outputs @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict(backbone=self.backbone, head=self.head) return items @property def backbone(self): return self._backbone @property def detection_generator(self): return self._detection_generator @property def head(self): return self._head def get_config(self) -> Mapping[str, Any]: config_dict = { 'backbone': self._backbone, 'head': self._head, 'detection_generator': self._detection_generator, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
2,616
29.08046
91
py
models
models-master/official/projects/centernet/modeling/centernet_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for centernet detection model.""" from absl.testing import parameterized import tensorflow as tf from official.projects.centernet.configs import backbones from official.projects.centernet.modeling import centernet_model from official.projects.centernet.modeling.backbones import hourglass from official.projects.centernet.modeling.heads import centernet_head from official.projects.centernet.modeling.layers import detection_generator from official.vision.configs import common class CenterNetTest(parameterized.TestCase, tf.test.TestCase): def testBuildCenterNet(self): backbone = hourglass.build_hourglass( input_specs=tf.keras.layers.InputSpec(shape=[None, 512, 512, 3]), backbone_config=backbones.Backbone(type='hourglass'), norm_activation_config=common.NormActivation(use_sync_bn=True) ) task_config = { 'ct_heatmaps': 90, 'ct_offset': 2, 'ct_size': 2, } input_levels = ['2_0', '2'] head = centernet_head.CenterNetHead( task_outputs=task_config, input_specs=backbone.output_specs, input_levels=input_levels) detection_ge = detection_generator.CenterNetDetectionGenerator() model = centernet_model.CenterNetModel( backbone=backbone, head=head, detection_generator=detection_ge ) outputs = model(tf.zeros((5, 512, 512, 3))) self.assertLen(outputs['raw_output'], 3) self.assertLen(outputs['raw_output']['ct_heatmaps'], 2) self.assertLen(outputs['raw_output']['ct_offset'], 2) self.assertLen(outputs['raw_output']['ct_size'], 2) self.assertEqual(outputs['raw_output']['ct_heatmaps'][0].shape, (5, 128, 128, 90)) self.assertEqual(outputs['raw_output']['ct_offset'][0].shape, (5, 128, 128, 2)) self.assertEqual(outputs['raw_output']['ct_size'][0].shape, (5, 128, 128, 2)) if __name__ == '__main__': tf.test.main()
2,575
34.287671
75
py
models
models-master/official/projects/centernet/modeling/layers/detection_generator.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Detection generator for centernet. Parses predictions from the CenterNet head into the final bounding boxes, confidences, and classes. This class contains repurposed methods from the TensorFlow Object Detection API in: https://github.com/tensorflow/models/blob/master/research/object_detection /meta_architectures/center_net_meta_arch.py """ from typing import Any, Mapping import tensorflow as tf from official.projects.centernet.ops import loss_ops from official.projects.centernet.ops import nms_ops from official.vision.ops import box_ops class CenterNetDetectionGenerator(tf.keras.layers.Layer): """CenterNet Detection Generator.""" def __init__(self, input_image_dims: int = 512, net_down_scale: int = 4, max_detections: int = 100, peak_error: float = 1e-6, peak_extract_kernel_size: int = 3, class_offset: int = 1, use_nms: bool = False, nms_pre_thresh: float = 0.1, nms_thresh: float = 0.4, **kwargs): """Initialize CenterNet Detection Generator. Args: input_image_dims: An `int` that specifies the input image size. net_down_scale: An `int` that specifies stride of the output. max_detections: An `int` specifying the maximum number of bounding boxes generated. This is an upper bound, so the number of generated boxes may be less than this due to thresholding/non-maximum suppression. peak_error: A `float` for determining non-valid heatmap locations to mask. peak_extract_kernel_size: An `int` indicating the kernel size used when performing max-pool over the heatmaps to detect valid center locations from its neighbors. From the paper, set this to 3 to detect valid. locations that have responses greater than its 8-connected neighbors class_offset: An `int` indicating to add an offset to the class prediction if the dataset labels have been shifted. use_nms: A `bool` for whether or not to use non-maximum suppression to filter the bounding boxes. nms_pre_thresh: A `float` for pre-nms threshold. nms_thresh: A `float` for nms threshold. **kwargs: Additional keyword arguments to be passed. """ super(CenterNetDetectionGenerator, self).__init__(**kwargs) # Object center selection parameters self._max_detections = max_detections self._peak_error = peak_error self._peak_extract_kernel_size = peak_extract_kernel_size # Used for adjusting class prediction self._class_offset = class_offset # Box normalization parameters self._net_down_scale = net_down_scale self._input_image_dims = input_image_dims self._use_nms = use_nms self._nms_pre_thresh = nms_pre_thresh self._nms_thresh = nms_thresh def process_heatmap(self, feature_map: tf.Tensor, kernel_size: int) -> tf.Tensor: """Processes the heatmap into peaks for box selection. Given a heatmap, this function first masks out nearby heatmap locations of the same class using max-pooling such that, ideally, only one center for the object remains. Then, center locations are masked according to their scores in comparison to a threshold. NOTE: Repurposed from Google OD API. Args: feature_map: A Tensor with shape [batch_size, height, width, num_classes] which is the center heatmap predictions. kernel_size: An integer value for max-pool kernel size. Returns: A Tensor with the same shape as the input but with non-valid center prediction locations masked out. """ feature_map = tf.math.sigmoid(feature_map) if not kernel_size or kernel_size == 1: feature_map_peaks = feature_map else: feature_map_max_pool = tf.nn.max_pool( feature_map, ksize=kernel_size, strides=1, padding='SAME') feature_map_peak_mask = tf.math.abs( feature_map - feature_map_max_pool) < self._peak_error # Zero out everything that is not a peak. feature_map_peaks = ( feature_map * tf.cast(feature_map_peak_mask, feature_map.dtype)) return feature_map_peaks def get_top_k_peaks(self, feature_map_peaks: tf.Tensor, batch_size: int, width: int, num_classes: int, k: int = 100): """Gets the scores and indices of the top-k peaks from the feature map. This function flattens the feature map in order to retrieve the top-k peaks, then computes the x, y, and class indices for those scores. NOTE: Repurposed from Google OD API. Args: feature_map_peaks: A `Tensor` with shape [batch_size, height, width, num_classes] which is the processed center heatmap peaks. batch_size: An `int` that indicates the batch size of the input. width: An `int` that indicates the width (and also height) of the input. num_classes: An `int` for the number of possible classes. This is also the channel depth of the input. k: `int`` that controls how many peaks to select. Returns: top_scores: A Tensor with shape [batch_size, k] containing the top-k scores. y_indices: A Tensor with shape [batch_size, k] containing the top-k y-indices corresponding to top_scores. x_indices: A Tensor with shape [batch_size, k] containing the top-k x-indices corresponding to top_scores. channel_indices: A Tensor with shape [batch_size, k] containing the top-k channel indices corresponding to top_scores. """ # Flatten the entire prediction per batch feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) # top_scores and top_indices have shape [batch_size, k] top_scores, top_indices = tf.math.top_k(feature_map_peaks_flat, k=k) # Get x, y and channel indices corresponding to the top indices in the flat # array. y_indices, x_indices, channel_indices = ( loss_ops.get_row_col_channel_indices_from_flattened_indices( top_indices, width, num_classes)) return top_scores, y_indices, x_indices, channel_indices def get_boxes(self, y_indices: tf.Tensor, x_indices: tf.Tensor, channel_indices: tf.Tensor, height_width_predictions: tf.Tensor, offset_predictions: tf.Tensor, num_boxes: int): """Organizes prediction information into the final bounding boxes. NOTE: Repurposed from Google OD API. Args: y_indices: A Tensor with shape [batch_size, k] containing the top-k y-indices corresponding to top_scores. x_indices: A Tensor with shape [batch_size, k] containing the top-k x-indices corresponding to top_scores. channel_indices: A Tensor with shape [batch_size, k] containing the top-k channel indices corresponding to top_scores. height_width_predictions: A Tensor with shape [batch_size, height, width, 2] containing the object size predictions. offset_predictions: A Tensor with shape [batch_size, height, width, 2] containing the object local offset predictions. num_boxes: `int`, the number of boxes. Returns: boxes: A Tensor with shape [batch_size, num_boxes, 4] that contains the bounding box coordinates in [y_min, x_min, y_max, x_max] format. detection_classes: A Tensor with shape [batch_size, num_boxes] that gives the class prediction for each box. num_detections: Number of non-zero confidence detections made. """ # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. # shapes of heatmap output shape = tf.shape(height_width_predictions) batch_size, height, width = shape[0], shape[1], shape[2] # combined indices dtype=int32 combined_indices = tf.stack([ loss_ops.multi_range(batch_size, value_repetitions=num_boxes), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) new_height_width = tf.gather_nd(height_width_predictions, combined_indices) new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2]) height_width = tf.maximum(new_height_width, 0.0) # height and widths dtype=float32 heights = height_width[..., 0] widths = height_width[..., 1] # Get the offsets of center points new_offsets = tf.gather_nd(offset_predictions, combined_indices) offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2]) # offsets are dtype=float32 y_offsets = offsets[..., 0] x_offsets = offsets[..., 1] y_indices = tf.cast(y_indices, dtype=heights.dtype) x_indices = tf.cast(x_indices, dtype=widths.dtype) detection_classes = channel_indices + self._class_offset ymin = y_indices + y_offsets - heights / 2.0 xmin = x_indices + x_offsets - widths / 2.0 ymax = y_indices + y_offsets + heights / 2.0 xmax = x_indices + x_offsets + widths / 2.0 ymin = tf.clip_by_value(ymin, 0., tf.cast(height, ymin.dtype)) xmin = tf.clip_by_value(xmin, 0., tf.cast(width, xmin.dtype)) ymax = tf.clip_by_value(ymax, 0., tf.cast(height, ymax.dtype)) xmax = tf.clip_by_value(xmax, 0., tf.cast(width, xmax.dtype)) boxes = tf.stack([ymin, xmin, ymax, xmax], axis=2) return boxes, detection_classes def convert_strided_predictions_to_normalized_boxes(self, boxes: tf.Tensor): boxes = boxes * tf.cast(self._net_down_scale, boxes.dtype) boxes = boxes / tf.cast(self._input_image_dims, boxes.dtype) boxes = tf.clip_by_value(boxes, 0.0, 1.0) return boxes def __call__(self, inputs): # Get heatmaps from decoded outputs via final hourglass stack output all_ct_heatmaps = inputs['ct_heatmaps'] all_ct_sizes = inputs['ct_size'] all_ct_offsets = inputs['ct_offset'] ct_heatmaps = all_ct_heatmaps[-1] ct_sizes = all_ct_sizes[-1] ct_offsets = all_ct_offsets[-1] shape = tf.shape(ct_heatmaps) _, width = shape[1], shape[2] batch_size, num_channels = shape[0], shape[3] # Process heatmaps using 3x3 max pool and applying sigmoid peaks = self.process_heatmap( feature_map=ct_heatmaps, kernel_size=self._peak_extract_kernel_size) # Get top scores along with their x, y, and class # Each has size [batch_size, k] scores, y_indices, x_indices, channel_indices = self.get_top_k_peaks( feature_map_peaks=peaks, batch_size=batch_size, width=width, num_classes=num_channels, k=self._max_detections) # Parse the score and indices into bounding boxes boxes, classes = self.get_boxes( y_indices=y_indices, x_indices=x_indices, channel_indices=channel_indices, height_width_predictions=ct_sizes, offset_predictions=ct_offsets, num_boxes=self._max_detections) # Normalize bounding boxes boxes = self.convert_strided_predictions_to_normalized_boxes(boxes) # Apply nms if self._use_nms: boxes = tf.expand_dims(boxes, axis=-2) multi_class_scores = tf.gather_nd( peaks, tf.stack([y_indices, x_indices], -1), batch_dims=1) boxes, _, scores = nms_ops.nms( boxes=boxes, classes=multi_class_scores, confidence=scores, k=self._max_detections, limit_pre_thresh=True, pre_nms_thresh=0.1, nms_thresh=0.4) num_det = tf.reduce_sum(tf.cast(scores > 0, dtype=tf.int32), axis=1) boxes = box_ops.denormalize_boxes( boxes, [self._input_image_dims, self._input_image_dims]) return { 'boxes': boxes, 'classes': classes, 'confidence': scores, 'num_detections': num_det } def get_config(self) -> Mapping[str, Any]: config = { 'max_detections': self._max_detections, 'peak_error': self._peak_error, 'peak_extract_kernel_size': self._peak_extract_kernel_size, 'class_offset': self._class_offset, 'net_down_scale': self._net_down_scale, 'input_image_dims': self._input_image_dims, 'use_nms': self._use_nms, 'nms_pre_thresh': self._nms_pre_thresh, 'nms_thresh': self._nms_thresh } base_config = super(CenterNetDetectionGenerator, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
13,299
38.117647
80
py
models
models-master/official/projects/centernet/modeling/layers/cn_nn_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for centernet neural networks.""" from typing import List, Optional import tensorflow as tf from official.vision.modeling.layers import nn_blocks def _apply_blocks(inputs, blocks): """Apply blocks to inputs.""" net = inputs for block in blocks: net = block(net) return net def _make_repeated_residual_blocks( reps: int, out_channels: int, use_sync_bn: bool = True, norm_momentum: float = 0.1, norm_epsilon: float = 1e-5, residual_channels: Optional[int] = None, initial_stride: int = 1, initial_skip_conv: bool = False, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, ): """Stack Residual blocks one after the other. Args: reps: `int` for desired number of residual blocks out_channels: `int`, filter depth of the final residual block use_sync_bn: A `bool`, if True, use synchronized batch normalization. norm_momentum: `float`, momentum for the batch normalization layers norm_epsilon: `float`, epsilon for the batch normalization layers residual_channels: `int`, filter depth for the first reps - 1 residual blocks. If None, defaults to the same value as out_channels. If not equal to out_channels, then uses a projection shortcut in the final residual block initial_stride: `int`, stride for the first residual block initial_skip_conv: `bool`, if set, the first residual block uses a skip convolution. This is useful when the number of channels in the input are not the same as residual_channels. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. Returns: blocks: A list of residual blocks to be applied in sequence. """ blocks = [] if residual_channels is None: residual_channels = out_channels for i in range(reps - 1): # Only use the stride at the first block so we don't repeatedly downsample # the input stride = initial_stride if i == 0 else 1 # If the stride is more than 1, we cannot use an identity layer for the # skip connection and are forced to use a conv for the skip connection. skip_conv = stride > 1 if i == 0 and initial_skip_conv: skip_conv = True blocks.append(nn_blocks.ResidualBlock( filters=residual_channels, strides=stride, use_explicit_padding=True, use_projection=skip_conv, use_sync_bn=use_sync_bn, norm_momentum=norm_momentum, norm_epsilon=norm_epsilon, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)) if reps == 1: # If there is only 1 block, the `for` loop above is not run, # therefore we honor the requested stride in the last residual block stride = initial_stride # We are forced to use a conv in the skip connection if stride > 1 skip_conv = stride > 1 else: stride = 1 skip_conv = residual_channels != out_channels blocks.append(nn_blocks.ResidualBlock( filters=out_channels, strides=stride, use_explicit_padding=True, use_projection=skip_conv, use_sync_bn=use_sync_bn, norm_momentum=norm_momentum, norm_epsilon=norm_epsilon, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)) return tf.keras.Sequential(blocks) class HourglassBlock(tf.keras.layers.Layer): """Hourglass module: an encoder-decoder block.""" def __init__( self, channel_dims_per_stage: List[int], blocks_per_stage: List[int], strides: int = 1, use_sync_bn: bool = True, norm_momentum: float = 0.1, norm_epsilon: float = 1e-5, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initialize Hourglass module. Args: channel_dims_per_stage: List[int], list of filter sizes for Residual blocks. the output channels dimensions of stages in the network. `channel_dims[0]` is used to define the number of channels in the first encoder block and `channel_dims[1]` is used to define the number of channels in the second encoder block. The channels in the recursive inner layers are defined using `channel_dims[1:]`. For example, [nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4] where nc is the input_channel_dimension. blocks_per_stage: List[int], list of residual block repetitions per down/upsample. `blocks_per_stage[0]` defines the number of blocks at the current stage and `blocks_per_stage[1:]` is used at further stages. For example, [2, 2, 2, 2, 2, 4]. strides: `int`, stride parameter to the Residual block. use_sync_bn: A `bool`, if True, use synchronized batch normalization. norm_momentum: `float`, momentum for the batch normalization layers. norm_epsilon: `float`, epsilon for the batch normalization layers. kernel_initializer: A `str` for kernel initializer of conv layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ super(HourglassBlock, self).__init__(**kwargs) if len(channel_dims_per_stage) != len(blocks_per_stage): raise ValueError('filter size and residual block repetition ' 'lists must have the same length') self._num_stages = len(channel_dims_per_stage) - 1 self._channel_dims_per_stage = channel_dims_per_stage self._blocks_per_stage = blocks_per_stage self._strides = strides self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._filters = channel_dims_per_stage[0] if self._num_stages > 0: self._filters_downsampled = channel_dims_per_stage[1] self._reps = blocks_per_stage[0] def build(self, input_shape): if self._num_stages == 0: # base case, residual block repetitions in most inner part of hourglass self.blocks = _make_repeated_residual_blocks( reps=self._reps, out_channels=self._filters, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer) else: # outer hourglass structures self.encoder_block1 = _make_repeated_residual_blocks( reps=self._reps, out_channels=self._filters, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer) self.encoder_block2 = _make_repeated_residual_blocks( reps=self._reps, out_channels=self._filters_downsampled, initial_stride=2, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, initial_skip_conv=self._filters != self._filters_downsampled) # recursively define inner hourglasses self.inner_hg = type(self)( channel_dims_per_stage=self._channel_dims_per_stage[1:], blocks_per_stage=self._blocks_per_stage[1:], strides=self._strides) # outer hourglass structures self.decoder_block = _make_repeated_residual_blocks( reps=self._reps, residual_channels=self._filters_downsampled, out_channels=self._filters, use_sync_bn=self._use_sync_bn, norm_epsilon=self._norm_epsilon, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer) self.upsample_layer = tf.keras.layers.UpSampling2D( size=2, interpolation='nearest') super(HourglassBlock, self).build(input_shape) def call(self, x, training=None): if self._num_stages == 0: return self.blocks(x) else: encoded_outputs = self.encoder_block1(x) encoded_downsampled_outputs = self.encoder_block2(x) inner_outputs = self.inner_hg(encoded_downsampled_outputs) hg_output = self.decoder_block(inner_outputs) return self.upsample_layer(hg_output) + encoded_outputs def get_config(self): config = { 'channel_dims_per_stage': self._channel_dims_per_stage, 'blocks_per_stage': self._blocks_per_stage, 'strides': self._strides, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, } config.update(super(HourglassBlock, self).get_config()) return config class CenterNetHeadConv(tf.keras.layers.Layer): """Convolution block for the CenterNet head.""" def __init__(self, output_filters: int, bias_init: float, name: str, **kwargs): """Initialize CenterNet head. Args: output_filters: `int`, channel depth of layer output bias_init: `float`, value to initialize the bias vector for the final convolution layer name: `string`, layer name **kwargs: Additional keyword arguments to be passed. """ super(CenterNetHeadConv, self).__init__(name=name, **kwargs) self._output_filters = output_filters self._bias_init = bias_init def build(self, input_shape): n_channels = input_shape[-1] self.conv1 = tf.keras.layers.Conv2D( filters=n_channels, kernel_size=(3, 3), padding='same') self.relu = tf.keras.layers.ReLU() # Initialize bias to the last Conv2D Layer self.conv2 = tf.keras.layers.Conv2D( filters=self._output_filters, kernel_size=(1, 1), padding='valid', bias_initializer=tf.constant_initializer(self._bias_init)) super(CenterNetHeadConv, self).build(input_shape) def call(self, x, training=None): x = self.conv1(x) x = self.relu(x) x = self.conv2(x) return x def get_config(self): config = { 'output_filters': self._output_filters, 'bias_init': self._bias_init, } config.update(super(CenterNetHeadConv, self).get_config()) return config
12,200
36.198171
80
py
models
models-master/official/projects/centernet/modeling/layers/cn_nn_blocks_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Centernet nn_blocks. It is a literal translation of the PyTorch implementation. """ from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.centernet.modeling.layers import cn_nn_blocks from official.vision.modeling.layers import nn_blocks class HourglassBlockPyTorch(tf.keras.layers.Layer): """An CornerNet-style implementation of the hourglass block.""" def __init__(self, dims, modules, k=0, **kwargs): """An CornerNet-style implementation of the hourglass block. Args: dims: input sizes of residual blocks modules: number of repetitions of the residual blocks in each hourglass upsampling and downsampling k: recursive parameter **kwargs: Additional keyword arguments to be passed. """ super(HourglassBlockPyTorch).__init__() if len(dims) != len(modules): raise ValueError('dims and modules lists must have the same length') self.n = len(dims) - 1 self.k = k self.modules = modules self.dims = dims self._kwargs = kwargs def build(self, input_shape): modules = self.modules dims = self.dims k = self.k kwargs = self._kwargs curr_mod = modules[k] next_mod = modules[k + 1] curr_dim = dims[k + 0] next_dim = dims[k + 1] self.up1 = self.make_up_layer(3, curr_dim, curr_dim, curr_mod, **kwargs) self.max1 = tf.keras.layers.MaxPool2D(strides=2) self.low1 = self.make_hg_layer(3, curr_dim, next_dim, curr_mod, **kwargs) if self.n - k > 1: self.low2 = type(self)(dims, modules, k=k + 1, **kwargs) else: self.low2 = self.make_low_layer( 3, next_dim, next_dim, next_mod, **kwargs) self.low3 = self.make_hg_layer_revr( 3, next_dim, curr_dim, curr_mod, **kwargs) self.up2 = tf.keras.layers.UpSampling2D(2) self.merge = tf.keras.layers.Add() super(HourglassBlockPyTorch, self).build(input_shape) def call(self, x): up1 = self.up1(x) max1 = self.max1(x) low1 = self.low1(max1) low2 = self.low2(low1) low3 = self.low3(low2) up2 = self.up2(low3) return self.merge([up1, up2]) def make_layer(self, k, inp_dim, out_dim, modules, **kwargs): layers = [ nn_blocks.ResidualBlock(out_dim, 1, use_projection=True, **kwargs)] for _ in range(1, modules): layers.append(nn_blocks.ResidualBlock(out_dim, 1, **kwargs)) return tf.keras.Sequential(layers) def make_layer_revr(self, k, inp_dim, out_dim, modules, **kwargs): layers = [] for _ in range(modules - 1): layers.append( nn_blocks.ResidualBlock(inp_dim, 1, **kwargs)) layers.append( nn_blocks.ResidualBlock(out_dim, 1, use_projection=True, **kwargs)) return tf.keras.Sequential(layers) def make_up_layer(self, k, inp_dim, out_dim, modules, **kwargs): return self.make_layer(k, inp_dim, out_dim, modules, **kwargs) def make_low_layer(self, k, inp_dim, out_dim, modules, **kwargs): return self.make_layer(k, inp_dim, out_dim, modules, **kwargs) def make_hg_layer(self, k, inp_dim, out_dim, modules, **kwargs): return self.make_layer(k, inp_dim, out_dim, modules, **kwargs) def make_hg_layer_revr(self, k, inp_dim, out_dim, modules, **kwargs): return self.make_layer_revr(k, inp_dim, out_dim, modules, **kwargs) class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): def test_hourglass_block(self): dims = [256, 256, 384, 384, 384, 512] modules = [2, 2, 2, 2, 2, 4] model = cn_nn_blocks.HourglassBlock(dims, modules) test_input = tf.keras.Input((512, 512, 256)) _ = model(test_input) filter_sizes = [256, 256, 384, 384, 384, 512] rep_sizes = [2, 2, 2, 2, 2, 4] hg_test_input_shape = (1, 512, 512, 256) # bb_test_input_shape = (1, 512, 512, 3) x_hg = tf.ones(shape=hg_test_input_shape) # x_bb = tf.ones(shape=bb_test_input_shape) hg = cn_nn_blocks.HourglassBlock( channel_dims_per_stage=filter_sizes, blocks_per_stage=rep_sizes) hg.build(input_shape=hg_test_input_shape) out = hg(x_hg) self.assertAllEqual( tf.shape(out), hg_test_input_shape, 'Hourglass module output shape and expected shape differ') # ODAPI Test layer = cn_nn_blocks.HourglassBlock( blocks_per_stage=[2, 3, 4, 5, 6], channel_dims_per_stage=[4, 6, 8, 10, 12]) output = layer(np.zeros((2, 64, 64, 4), dtype=np.float32)) self.assertEqual(output.shape, (2, 64, 64, 4)) if __name__ == '__main__': tf.test.main()
5,148
32.219355
77
py
models
models-master/official/projects/centernet/modeling/backbones/hourglass.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build Hourglass backbone.""" from typing import Optional import tensorflow as tf from official.modeling import hyperparams from official.projects.centernet.modeling.layers import cn_nn_blocks from official.vision.modeling.backbones import factory from official.vision.modeling.backbones import mobilenet from official.vision.modeling.layers import nn_blocks HOURGLASS_SPECS = { 10: { 'blocks_per_stage': [1, 1], 'channel_dims_per_stage': [2, 2] }, 20: { 'blocks_per_stage': [1, 2, 2], 'channel_dims_per_stage': [2, 2, 3] }, 32: { 'blocks_per_stage': [2, 2, 2, 2], 'channel_dims_per_stage': [2, 2, 3, 3] }, 52: { 'blocks_per_stage': [2, 2, 2, 2, 2, 4], 'channel_dims_per_stage': [2, 2, 3, 3, 3, 4] }, 100: { 'blocks_per_stage': [4, 4, 4, 4, 4, 8], 'channel_dims_per_stage': [2, 2, 3, 3, 3, 4] }, } class Hourglass(tf.keras.Model): """CenterNet Hourglass backbone.""" def __init__( self, model_id: int, input_channel_dims: int, input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), num_hourglasses: int = 1, initial_downsample: bool = True, activation: str = 'relu', use_sync_bn: bool = True, norm_momentum=0.1, norm_epsilon=1e-5, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initialize Hourglass backbone. Args: model_id: An `int` of the scale of Hourglass backbone model. input_channel_dims: `int`, number of filters used to downsample the input image. input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor. num_hourglasses: `int``, number of hourglass blocks in backbone. For example, hourglass-104 has two hourglass-52 modules. initial_downsample: `bool`, whether or not to downsample the input. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: `float`, momentum for the batch normalization layers. norm_epsilon: `float`, epsilon for the batch normalization layers. kernel_initializer: A `str` for kernel initializer of conv layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ self._input_channel_dims = input_channel_dims self._model_id = model_id self._num_hourglasses = num_hourglasses self._initial_downsample = initial_downsample self._activation = activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon specs = HOURGLASS_SPECS[model_id] self._blocks_per_stage = specs['blocks_per_stage'] self._channel_dims_per_stage = [item * self._input_channel_dims for item in specs['channel_dims_per_stage']] inputs = tf.keras.layers.Input(shape=input_specs.shape[1:]) inp_filters = self._channel_dims_per_stage[0] # Downsample the input if initial_downsample: prelayer_kernel_size = 7 prelayer_strides = 2 else: prelayer_kernel_size = 3 prelayer_strides = 1 x_downsampled = mobilenet.Conv2DBNBlock( filters=self._input_channel_dims, kernel_size=prelayer_kernel_size, strides=prelayer_strides, use_explicit_padding=True, activation=self._activation, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(inputs) x_downsampled = nn_blocks.ResidualBlock( filters=inp_filters, use_projection=True, use_explicit_padding=True, strides=prelayer_strides, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(x_downsampled) all_heatmaps = {} for i in range(num_hourglasses): # Create an hourglass stack x_hg = cn_nn_blocks.HourglassBlock( channel_dims_per_stage=self._channel_dims_per_stage, blocks_per_stage=self._blocks_per_stage, )(x_downsampled) x_hg = mobilenet.Conv2DBNBlock( filters=inp_filters, kernel_size=3, strides=1, use_explicit_padding=True, activation=self._activation, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(x_hg) # Given two down-sampling blocks above, the starting level is set to 2 # To make it compatible with implementation of remaining backbones, the # output of hourglass backbones is organized as # '2' -> the last layer of output # '2_0' -> the first layer of output # ...... # '2_{num_hourglasses-2}' -> the second to last layer of output if i < num_hourglasses - 1: all_heatmaps['2_{}'.format(i)] = x_hg else: all_heatmaps['2'] = x_hg # Intermediate conv and residual layers between hourglasses if i < num_hourglasses - 1: inter_hg_conv1 = mobilenet.Conv2DBNBlock( filters=inp_filters, kernel_size=1, strides=1, activation='identity', bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(x_downsampled) inter_hg_conv2 = mobilenet.Conv2DBNBlock( filters=inp_filters, kernel_size=1, strides=1, activation='identity', bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(x_hg) x_downsampled = tf.keras.layers.Add()([inter_hg_conv1, inter_hg_conv2]) x_downsampled = tf.keras.layers.ReLU()(x_downsampled) x_downsampled = nn_blocks.ResidualBlock( filters=inp_filters, use_projection=False, use_explicit_padding=True, strides=1, bias_regularizer=self._bias_regularizer, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(x_downsampled) self._output_specs = {l: all_heatmaps[l].get_shape() for l in all_heatmaps} super().__init__(inputs=inputs, outputs=all_heatmaps, **kwargs) def get_config(self): config = { 'model_id': self._model_id, 'input_channel_dims': self._input_channel_dims, 'num_hourglasses': self._num_hourglasses, 'initial_downsample': self._initial_downsample, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } config.update(super(Hourglass, self).get_config()) return config @property def num_hourglasses(self): return self._num_hourglasses @property def output_specs(self): return self._output_specs @factory.register_backbone_builder('hourglass') def build_hourglass( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds Hourglass backbone from a configuration.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'hourglass', (f'Inconsistent backbone type ' f'{backbone_type}') return Hourglass( model_id=backbone_cfg.model_id, input_channel_dims=backbone_cfg.input_channel_dims, num_hourglasses=backbone_cfg.num_hourglasses, input_specs=input_specs, initial_downsample=backbone_cfg.initial_downsample, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, )
10,285
36.268116
80
py
models
models-master/official/projects/centernet/modeling/backbones/hourglass_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for hourglass module.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.centernet.common import registry_imports # pylint: disable=unused-import from official.projects.centernet.configs import backbones from official.projects.centernet.modeling.backbones import hourglass from official.vision.configs import common class HourglassTest(tf.test.TestCase, parameterized.TestCase): def test_hourglass(self): backbone = hourglass.build_hourglass( input_specs=tf.keras.layers.InputSpec(shape=[None, 512, 512, 3]), backbone_config=backbones.Backbone(type='hourglass'), norm_activation_config=common.NormActivation(use_sync_bn=True) ) inputs = np.zeros((2, 512, 512, 3), dtype=np.float32) outputs = backbone(inputs) self.assertEqual(outputs['2_0'].shape, (2, 128, 128, 256)) self.assertEqual(outputs['2'].shape, (2, 128, 128, 256)) if __name__ == '__main__': tf.test.main()
1,602
36.27907
96
py
models
models-master/official/projects/centernet/modeling/heads/centernet_head_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Centernet Head.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.projects.centernet.modeling.heads import centernet_head class CenterNetHeadTest(tf.test.TestCase, parameterized.TestCase): def test_decoder_shape(self): task_config = { 'ct_heatmaps': 90, 'ct_offset': 2, 'ct_size': 2, } input_specs = { '2_0': tf.keras.layers.InputSpec(shape=(None, 128, 128, 256)).shape, '2': tf.keras.layers.InputSpec(shape=(None, 128, 128, 256)).shape, } input_levels = ['2', '2_0'] head = centernet_head.CenterNetHead( task_outputs=task_config, input_specs=input_specs, input_levels=input_levels) config = head.get_config() self.assertEqual(config['heatmap_bias'], -2.19) # Output shape tests outputs = head([np.zeros((2, 128, 128, 256), dtype=np.float32), np.zeros((2, 128, 128, 256), dtype=np.float32)]) self.assertLen(outputs, 3) self.assertEqual(outputs['ct_heatmaps'][0].shape, (2, 128, 128, 90)) self.assertEqual(outputs['ct_offset'][0].shape, (2, 128, 128, 2)) self.assertEqual(outputs['ct_size'][0].shape, (2, 128, 128, 2)) # Weight initialization tests hm_bias_vector = np.asarray(head.layers[2].weights[-1]) off_bias_vector = np.asarray(head.layers[4].weights[-1]) size_bias_vector = np.asarray(head.layers[6].weights[-1]) self.assertArrayNear(hm_bias_vector, np.repeat(-2.19, repeats=90), err=1.00e-6) self.assertArrayNear(off_bias_vector, np.repeat(0, repeats=2), err=1.00e-6) self.assertArrayNear(size_bias_vector, np.repeat(0, repeats=2), err=1.00e-6) if __name__ == '__main__': tf.test.main()
2,425
33.657143
76
py
models
models-master/official/projects/centernet/modeling/heads/centernet_head.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the definitions of head for CenterNet.""" from typing import Any, Dict, List, Mapping import tensorflow as tf from official.projects.centernet.modeling.layers import cn_nn_blocks class CenterNetHead(tf.keras.Model): """CenterNet Head.""" def __init__(self, input_specs: Dict[str, tf.TensorShape], task_outputs: Mapping[str, int], input_levels: List[str], heatmap_bias: float = -2.19, **kwargs): """CenterNet Head Initialization. Args: input_specs: A `dict` of input specifications. task_outputs: A `dict`, with key-value pairs denoting the names of the outputs and the desired channel depth of each output. input_levels: list of str representing the level used as input to the CenternetHead from the backbone. For example, ['2_0', '2'] should be set for hourglass-104 has two hourglass-52 modules, since the output of hourglass backbones is organized as: '2' -> the last layer of output '2_0' -> the first layer of output ...... '2_{num_hourglasses-2}' -> the second to last layer of output. heatmap_bias: `float`, constant value to initialize the convolution layer bias vector if it is responsible for generating a heatmap (not for regressed predictions). **kwargs: Additional keyword arguments to be passed. Returns: dictionary where the keys-value pairs denote the names of the output and the respective output tensor """ assert input_levels, f'Please specify input levels: {input_levels}' self._input_specs = input_specs self._task_outputs = task_outputs self._input_levels = input_levels self._heatmap_bias = heatmap_bias self._num_inputs = len(input_levels) inputs = {level: tf.keras.layers.Input(shape=self._input_specs[level][1:]) for level in input_levels} outputs = {} for key in self._task_outputs: # pylint: disable=g-complex-comprehension outputs[key] = [ cn_nn_blocks.CenterNetHeadConv( output_filters=self._task_outputs[key], bias_init=self._heatmap_bias if 'heatmaps' in key else 0, name=key + str(i), )(inputs[i]) for i in input_levels ] self._output_specs = { key: [value[i].get_shape() for i in range(self._num_inputs)] for key, value in outputs.items() } super().__init__(inputs=inputs, outputs=outputs, name='CenterNetHead', **kwargs) def get_config(self) -> Mapping[str, Any]: config = { 'input_spec': self._input_specs, 'task_outputs': self._task_outputs, 'heatmap_bias': self._heatmap_bias, 'input_levels': self._input_levels, } base_config = super(CenterNetHead, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self) -> Mapping[str, tf.TensorShape]: """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs
3,833
35.169811
79
py
models
models-master/official/projects/centernet/tasks/centernet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Centernet task definition.""" from typing import Any, List, Optional, Tuple from absl import logging import tensorflow as tf from official.core import base_task from official.core import input_reader from official.core import task_factory from official.projects.centernet.configs import centernet as exp_cfg from official.projects.centernet.dataloaders import centernet_input from official.projects.centernet.losses import centernet_losses from official.projects.centernet.modeling import centernet_model from official.projects.centernet.modeling.heads import centernet_head from official.projects.centernet.modeling.layers import detection_generator from official.projects.centernet.ops import loss_ops from official.projects.centernet.ops import target_assigner from official.vision.dataloaders import tf_example_decoder from official.vision.dataloaders import tfds_factory from official.vision.dataloaders import tf_example_label_map_decoder from official.vision.evaluation import coco_evaluator from official.vision.modeling.backbones import factory @task_factory.register_task_cls(exp_cfg.CenterNetTask) class CenterNetTask(base_task.Task): """Task definition for centernet.""" def build_inputs(self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None): """Build input dataset.""" if params.tfds_name: decoder = tfds_factory.get_detection_decoder(params.tfds_name) else: decoder_cfg = params.decoder.get() if params.decoder.type == 'simple_decoder': decoder = tf_example_decoder.TfExampleDecoder( regenerate_source_id=decoder_cfg.regenerate_source_id) elif params.decoder.type == 'label_map_decoder': decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap( label_map=decoder_cfg.label_map, regenerate_source_id=decoder_cfg.regenerate_source_id) else: raise ValueError('Unknown decoder type: {}!'.format( params.decoder.type)) parser = centernet_input.CenterNetParser( output_height=self.task_config.model.input_size[0], output_width=self.task_config.model.input_size[1], max_num_instances=self.task_config.model.max_num_instances, bgr_ordering=params.parser.bgr_ordering, channel_means=params.parser.channel_means, channel_stds=params.parser.channel_stds, aug_rand_hflip=params.parser.aug_rand_hflip, aug_scale_min=params.parser.aug_scale_min, aug_scale_max=params.parser.aug_scale_max, aug_rand_hue=params.parser.aug_rand_hue, aug_rand_brightness=params.parser.aug_rand_brightness, aug_rand_contrast=params.parser.aug_rand_contrast, aug_rand_saturation=params.parser.aug_rand_saturation, odapi_augmentation=params.parser.odapi_augmentation, dtype=params.dtype) reader = input_reader.InputReader( params, dataset_fn=tf.data.TFRecordDataset, decoder_fn=decoder.decode, parser_fn=parser.parse_fn(params.is_training)) dataset = reader.read(input_context=input_context) return dataset def build_model(self): """get an instance of CenterNet.""" model_config = self.task_config.model input_specs = tf.keras.layers.InputSpec( shape=[None] + model_config.input_size) l2_weight_decay = self.task_config.weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) backbone = factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=model_config.norm_activation, l2_regularizer=l2_regularizer) task_outputs = self.task_config.get_output_length_dict() head_config = model_config.head head = centernet_head.CenterNetHead( input_specs=backbone.output_specs, task_outputs=task_outputs, input_levels=head_config.input_levels, heatmap_bias=head_config.heatmap_bias) # output_specs is a dict backbone_output_spec = backbone.output_specs[head_config.input_levels[-1]] if len(backbone_output_spec) == 4: bb_output_height = backbone_output_spec[1] elif len(backbone_output_spec) == 3: bb_output_height = backbone_output_spec[0] else: raise ValueError self._net_down_scale = int(model_config.input_size[0] / bb_output_height) dg_config = model_config.detection_generator detect_generator_obj = detection_generator.CenterNetDetectionGenerator( max_detections=dg_config.max_detections, peak_error=dg_config.peak_error, peak_extract_kernel_size=dg_config.peak_extract_kernel_size, class_offset=dg_config.class_offset, net_down_scale=self._net_down_scale, input_image_dims=model_config.input_size[0], use_nms=dg_config.use_nms, nms_pre_thresh=dg_config.nms_pre_thresh, nms_thresh=dg_config.nms_thresh) model = centernet_model.CenterNetModel( backbone=backbone, head=head, detection_generator=detect_generator_obj) return model def initialize(self, model: tf.keras.Model): """Loading pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint # Restoring checkpoint. if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) if self.task_config.init_checkpoint_modules == 'all': ckpt = tf.train.Checkpoint(**model.checkpoint_items) status = ckpt.restore(ckpt_dir_or_file) status.assert_consumed() elif self.task_config.init_checkpoint_modules == 'backbone': ckpt = tf.train.Checkpoint(backbone=model.backbone) status = ckpt.restore(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: raise ValueError( "Only 'all' or 'backbone' can be used to initialize the model.") logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def build_losses(self, outputs, labels, aux_losses=None): """Build losses.""" input_size = self.task_config.model.input_size[0:2] output_size = outputs['ct_heatmaps'][0].get_shape().as_list()[1:3] gt_label = tf.map_fn( # pylint: disable=g-long-lambda fn=lambda x: target_assigner.assign_centernet_targets( labels=x, input_size=input_size, output_size=output_size, num_classes=self.task_config.model.num_classes, max_num_instances=self.task_config.model.max_num_instances, gaussian_iou=self.task_config.losses.gaussian_iou, class_offset=self.task_config.losses.class_offset), elems=labels, fn_output_signature={ 'ct_heatmaps': tf.TensorSpec( shape=[output_size[0], output_size[1], self.task_config.model.num_classes], dtype=tf.float32), 'ct_offset': tf.TensorSpec( shape=[self.task_config.model.max_num_instances, 2], dtype=tf.float32), 'size': tf.TensorSpec( shape=[self.task_config.model.max_num_instances, 2], dtype=tf.float32), 'box_mask': tf.TensorSpec( shape=[self.task_config.model.max_num_instances], dtype=tf.int32), 'box_indices': tf.TensorSpec( shape=[self.task_config.model.max_num_instances, 2], dtype=tf.int32), } ) losses = {} # Create loss functions object_center_loss_fn = centernet_losses.PenaltyReducedLogisticFocalLoss() localization_loss_fn = centernet_losses.L1LocalizationLoss() # Set up box indices so that they have a batch element as well box_indices = loss_ops.add_batch_to_indices(gt_label['box_indices']) box_mask = tf.cast(gt_label['box_mask'], dtype=tf.float32) num_boxes = tf.cast( loss_ops.get_num_instances_from_weights(gt_label['box_mask']), dtype=tf.float32) # Calculate center heatmap loss output_unpad_image_shapes = tf.math.ceil( tf.cast(labels['unpad_image_shapes'], tf.float32) / self._net_down_scale) valid_anchor_weights = loss_ops.get_valid_anchor_weights_in_flattened_image( output_unpad_image_shapes, output_size[0], output_size[1]) valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2) pred_ct_heatmap_list = outputs['ct_heatmaps'] true_flattened_ct_heatmap = loss_ops.flatten_spatial_dimensions( gt_label['ct_heatmaps']) true_flattened_ct_heatmap = tf.cast(true_flattened_ct_heatmap, tf.float32) total_center_loss = 0.0 for ct_heatmap in pred_ct_heatmap_list: pred_flattened_ct_heatmap = loss_ops.flatten_spatial_dimensions( ct_heatmap) pred_flattened_ct_heatmap = tf.cast(pred_flattened_ct_heatmap, tf.float32) total_center_loss += object_center_loss_fn( target_tensor=true_flattened_ct_heatmap, prediction_tensor=pred_flattened_ct_heatmap, weights=valid_anchor_weights) center_loss = tf.reduce_sum(total_center_loss) / float( len(pred_ct_heatmap_list) * num_boxes) losses['ct_loss'] = center_loss # Calculate scale loss pred_scale_list = outputs['ct_size'] true_scale = tf.cast(gt_label['size'], tf.float32) total_scale_loss = 0.0 for scale_map in pred_scale_list: pred_scale = loss_ops.get_batch_predictions_from_indices(scale_map, box_indices) pred_scale = tf.cast(pred_scale, tf.float32) # Only apply loss for boxes that appear in the ground truth total_scale_loss += tf.reduce_sum( localization_loss_fn(target_tensor=true_scale, prediction_tensor=pred_scale), axis=-1) * box_mask scale_loss = tf.reduce_sum(total_scale_loss) / float( len(pred_scale_list) * num_boxes) losses['scale_loss'] = scale_loss # Calculate offset loss pred_offset_list = outputs['ct_offset'] true_offset = tf.cast(gt_label['ct_offset'], tf.float32) total_offset_loss = 0.0 for offset_map in pred_offset_list: pred_offset = loss_ops.get_batch_predictions_from_indices(offset_map, box_indices) pred_offset = tf.cast(pred_offset, tf.float32) # Only apply loss for boxes that appear in the ground truth total_offset_loss += tf.reduce_sum( localization_loss_fn(target_tensor=true_offset, prediction_tensor=pred_offset), axis=-1) * box_mask offset_loss = tf.reduce_sum(total_offset_loss) / float( len(pred_offset_list) * num_boxes) losses['ct_offset_loss'] = offset_loss # Aggregate and finalize loss loss_weights = self.task_config.losses.detection total_loss = (loss_weights.object_center_weight * center_loss + loss_weights.scale_weight * scale_loss + loss_weights.offset_weight * offset_loss) if aux_losses: total_loss += tf.add_n(aux_losses) losses['total_loss'] = total_loss return losses def build_metrics(self, training=True): metrics = [] metric_names = ['total_loss', 'ct_loss', 'scale_loss', 'ct_offset_loss'] for name in metric_names: metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32)) if not training: if (self.task_config.validation_data.tfds_name and self.task_config.annotation_file): raise ValueError( "Can't evaluate using annotation file when TFDS is used.") self.coco_metric = coco_evaluator.COCOEvaluator( annotation_file=self.task_config.annotation_file, include_mask=False, per_category_metrics=self.task_config.per_category_metrics) return metrics def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) # Casting output layer as float32 is necessary when mixed_precision is # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32. outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) losses = self.build_losses(outputs['raw_output'], labels) scaled_loss = losses['total_loss'] / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) # compute the gradient tvars = model.trainable_variables gradients = tape.gradient(scaled_loss, tvars) # get unscaled loss if the scaled loss was used if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): gradients = optimizer.get_unscaled_gradients(gradients) if self.task_config.gradient_clip_norm > 0.0: gradients, _ = tf.clip_by_global_norm(gradients, self.task_config.gradient_clip_norm) optimizer.apply_gradients(list(zip(gradients, tvars))) logs = {self.loss: losses['total_loss']} if metrics: for m in metrics: m.update_state(losses[m.name]) logs.update({m.name: m.result()}) return logs def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Validation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs outputs = model(features, training=False) outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) losses = self.build_losses(outputs['raw_output'], labels) logs = {self.loss: losses['total_loss']} coco_model_outputs = { 'detection_boxes': outputs['boxes'], 'detection_scores': outputs['confidence'], 'detection_classes': outputs['classes'], 'num_detections': outputs['num_detections'], 'source_id': labels['groundtruths']['source_id'], 'image_info': labels['image_info'] } logs.update({self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)}) if metrics: for m in metrics: m.update_state(losses[m.name]) logs.update({m.name: m.result()}) return logs def aggregate_logs(self, state=None, step_outputs=None): if state is None: self.coco_metric.reset_states() state = self.coco_metric self.coco_metric.update_state(step_outputs[self.coco_metric.name][0], step_outputs[self.coco_metric.name][1]) return state def reduce_aggregated_logs(self, aggregated_logs, global_step=None): return self.coco_metric.result()
16,719
38.248826
80
py
models
models-master/official/projects/const_cl/modeling/const_cl_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Builds ConST-CL SSL models.""" from typing import Mapping, Optional import tensorflow as tf from official.projects.const_cl.configs import const_cl as const_cl_cfg from official.projects.const_cl.modeling.heads import instance_reconstructor from official.projects.const_cl.modeling.heads import simple from official.vision.modeling import backbones from official.vision.modeling import factory_3d as model_factory layers = tf.keras.layers class ConstCLModel(tf.keras.Model): """A ConST-CL SSL model class builder.""" def __init__( self, backbone, input_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, # global_head num_hidden_layers: int = 3, num_hidden_channels: int = 1024, num_output_channels: int = 128, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 1e-5, activation: Optional[str] = None, normalize_global_features: bool = False, # local_head context_level: int = 1, num_tx_output_channels: int = 1024, crop_size: int = 4, sample_offset: float = 0.5, num_tx_channels: int = 128, num_tx_layers: int = 3, num_tx_heads: int = 3, use_bias: bool = True, tx_activation: str = 'gelu', dropout_rate: float = 0.0, layer_norm_epsilon: float = 1e-6, use_positional_embedding: bool = True, normalize_local_features: bool = True, **kwargs): """Video Classification initialization function. Args: backbone: a 3d backbone network. input_specs: `tf.keras.layers.InputSpec` specs of the input tensor. num_hidden_layers: the number of hidden layers in the MLP. num_hidden_channels: the number of hidden nodes in the MLP. num_output_channels: the number of final output nodes in the MLP. use_sync_bn: whether to use sync batch norm in the MLP. norm_momentum: the MLP batch norm momentum. norm_epsilon: the MLP batch norm epsilon. activation: the MLP activation function. normalize_global_features: whether to normalize inputs to the MLP. context_level: the number of context frame to use. num_tx_output_channels: the number of final output channels for instance reconstrcutor. crop_size: the ROI aligner crop size. sample_offset: the ROI aligner sample offset. num_tx_channels: the Transformer decoder head channels. num_tx_layers: the number of Transformer decoder layers. num_tx_heads: the number of Transformer decoder heads per layer. use_bias: whether to use bias in the Transformer. tx_activation: the activation function to use in the Transformer. dropout_rate: the dropout rate for Transformer. layer_norm_epsilon: the layer norm epsilon. use_positional_embedding: whether to use positional embedding. normalize_local_features: whether to normalize input embeddings. **kwargs: keyword arguments to be passed. """ if not input_specs: input_specs = { 'image': layers.InputSpec(shape=[None, None, None, None, 3]), 'instances_position': layers.InputSpec(shape=[None, None, None, 4]), 'instances_mask': layers.InputSpec(shape=[None, None, None]), } self._self_setattr_tracking = False self._config_dict = { 'backbone': backbone, 'num_hidden_layers': num_hidden_layers, 'num_hidden_channels': num_hidden_channels, 'num_output_channels': num_output_channels, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'activation': activation, 'normalize_global_features': normalize_global_features, 'context_level': context_level, 'num_tx_output_channels': num_tx_output_channels, 'crop_size': crop_size, 'sample_offset': sample_offset, 'num_tx_channels': num_tx_channels, 'num_tx_layers': num_tx_layers, 'num_tx_heads': num_tx_heads, 'use_bias': use_bias, 'tx_activation': tx_activation, 'dropout_rate': dropout_rate, 'layer_norm_epsilon': layer_norm_epsilon, 'use_positional_embedding': use_positional_embedding, 'normalize_local_features': normalize_local_features, } self._input_specs = input_specs self._backbone = backbone inputs = { k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items() } endpoints = backbone(inputs['image']) res5 = endpoints['5'] res5 = tf.keras.layers.GlobalAveragePooling3D()(res5) res5_1 = endpoints['5_1'] global_embeddings = simple.MLP( num_hidden_layers=num_hidden_layers, num_hidden_channels=num_hidden_channels, num_output_channels=num_output_channels, use_sync_bn=use_sync_bn, norm_momentum=norm_momentum, norm_epsilon=norm_epsilon, activation=activation, normalize_inputs=normalize_global_features)(res5) instance_inputs = { 'features': res5_1, 'instances_position': inputs['instances_position'], 'instances_mask': inputs['instances_mask'], } instances_outputs = instance_reconstructor.InstanceReconstructor( context_level=context_level, # parameters for projector num_output_channels=num_tx_output_channels, # parameters for RoiAligner crop_size=crop_size, sample_offset=sample_offset, # parameters for TxDecoder num_tx_channels=num_tx_channels, num_tx_layers=num_tx_layers, num_tx_heads=num_tx_heads, use_bias=use_bias, activation=tx_activation, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon, use_positional_embedding=use_positional_embedding, normalize_inputs=normalize_local_features)(instance_inputs) outputs = instances_outputs outputs['global_embeddings'] = global_embeddings super().__init__(inputs=inputs, outputs=outputs, **kwargs) @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" return dict(backbone=self.backbone) @property def backbone(self): return self._backbone def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @model_factory.register_model_builder('const_cl_model') def build_const_cl_pretrain_model( input_specs_dict: Mapping[str, tf.keras.layers.InputSpec], model_config: const_cl_cfg.ConstCLModel, num_classes: int, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> ConstCLModel: """Builds the ConST-CL video ssl model.""" del num_classes backbone = backbones.factory.build_backbone( input_specs=input_specs_dict['image'], backbone_config=model_config.backbone, norm_activation_config=model_config.norm_activation, l2_regularizer=l2_regularizer) # Norm layer type in the MLP head should same with backbone if (model_config.norm_activation.use_sync_bn != model_config.global_head.use_sync_bn): raise ValueError('Should use the same batch normalization type.') return ConstCLModel( backbone=backbone, input_specs=input_specs_dict, # global_head num_hidden_channels=model_config.global_head.num_hidden_channels, num_hidden_layers=model_config.global_head.num_hidden_layers, num_output_channels=model_config.global_head.num_output_channels, use_sync_bn=model_config.global_head.use_sync_bn, norm_momentum=model_config.global_head.norm_momentum, norm_epsilon=model_config.global_head.norm_epsilon, activation=model_config.global_head.activation, normalize_global_features=model_config.global_head.normalize_inputs, # local_head context_level=model_config.local_head.context_level, num_tx_output_channels=model_config.local_head.num_output_channels, crop_size=model_config.local_head.crop_size, sample_offset=model_config.local_head.sample_offset, num_tx_channels=model_config.local_head.num_tx_channels, num_tx_layers=model_config.local_head.num_tx_layers, num_tx_heads=model_config.local_head.num_tx_heads, use_bias=model_config.local_head.use_bias, tx_activation=model_config.local_head.activation, dropout_rate=model_config.local_head.dropout_rate, layer_norm_epsilon=model_config.local_head.layer_norm_epsilon, use_positional_embedding=model_config.local_head.use_positional_embedding, normalize_local_features=model_config.local_head.normalize_inputs)
9,301
38.922747
80
py
models
models-master/official/projects/const_cl/modeling/const_cl_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for const_cl_model.""" import tensorflow as tf from official.projects.const_cl.configs import const_cl as const_cl_cfg from official.projects.const_cl.modeling import const_cl_model # pylint: disable=unused-import from official.projects.const_cl.modeling.backbones import resnet_3d # pylint: enable=unused-import class ConstClModelTest(tf.test.TestCase): def test_build_const_cl_pretrain_model(self): model_config = const_cl_cfg.ConstCLModel() images_input_specs = tf.keras.layers.InputSpec( shape=[None, 16, 224, 224, 4]) boxes_input_specs = tf.keras.layers.InputSpec(shape=[None, 16, 8, 4]) masks_input_specs = tf.keras.layers.InputSpec(shape=[None, 16, 8]) input_specs_dict = { 'image': images_input_specs, 'instances_position': boxes_input_specs, 'instances_mask': masks_input_specs, } model = const_cl_model.build_const_cl_pretrain_model( input_specs_dict=input_specs_dict, model_config=model_config, num_classes=500) self.assertIsInstance(model, const_cl_model.ConstCLModel) if __name__ == '__main__': tf.test.main()
1,740
35.270833
74
py
models
models-master/official/projects/const_cl/modeling/backbones/nn_blocks_3d.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for 3D networks.""" import tensorflow as tf from official.vision.modeling.layers import nn_blocks_3d from official.vision.modeling.layers import nn_layers SelfGating = nn_blocks_3d.SelfGating class BottleneckBlock3D(nn_blocks_3d.BottleneckBlock3D): """Creates a 3D bottleneck block.""" def build(self, input_shape): self._shortcut_maxpool = tf.keras.layers.MaxPool3D( pool_size=[1, 1, 1], strides=[ self._temporal_strides, self._spatial_strides, self._spatial_strides ]) self._shortcut_conv = tf.keras.layers.Conv3D( filters=4 * self._filters, kernel_size=1, strides=[ self._temporal_strides, self._spatial_strides, self._spatial_strides ], use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='shortcut_conv') self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, name='shortcut_conv/batch_norm') self._temporal_conv = tf.keras.layers.Conv3D( filters=self._filters, kernel_size=[self._temporal_kernel_size, 1, 1], strides=[self._temporal_strides, 1, 1], padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='temporal_conv') self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, name='temporal_conv/batch_norm') self._spatial_conv = tf.keras.layers.Conv3D( filters=self._filters, kernel_size=[1, 3, 3], strides=[1, self._spatial_strides, self._spatial_strides], padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='spatial_conv') self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, name='spatial_conv/batch_norm') self._expand_conv = tf.keras.layers.Conv3D( filters=4 * self._filters, kernel_size=[1, 1, 1], strides=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='expand_conv') self._norm3 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, name='expand_conv/batch_norm/') if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = nn_layers.SqueezeExcitation( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, use_3d_input=True, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='se_layer') else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None if self._use_self_gating: self._self_gating = SelfGating(filters=4 * self._filters, name='self_gating') else: self._self_gating = None
4,382
34.634146
80
py
models
models-master/official/projects/const_cl/modeling/backbones/resnet_3d_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.projects.const_cl.modeling.backbones import resnet_3d class ResNet3DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 50, 4, 'v0', False, 0.0), (128, 50, 4, 'v0', False, 0.2), (256, 50, 4, 'v0', True, 0.2), ) def test_network_creation(self, input_size, model_id, endpoint_filter_scale, stem_type, se_ratio, init_stochastic_depth_rate): """Test creation of ResNet3D family models.""" tf.keras.backend.set_image_data_format('channels_last') temporal_strides = [1, 1, 1, 1] temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)] use_self_gating = [True, False, True, False] network = resnet_3d.ResNet3DY( model_id=model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, use_self_gating=use_self_gating, stem_type=stem_type, se_ratio=se_ratio, init_stochastic_depth_rate=init_stochastic_depth_rate) inputs = tf.keras.Input(shape=(8, input_size, input_size, 3), batch_size=1) endpoints = network(inputs) self.assertAllEqual([ 1, 2, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale ], endpoints['2'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale ], endpoints['3'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale ], endpoints['4'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale ], endpoints['5'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale ], endpoints['5_1'].shape.as_list()) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id=50, temporal_strides=[1, 1, 1, 1], temporal_kernel_sizes=[(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)], stem_type='v0', stem_conv_temporal_kernel_size=5, stem_conv_temporal_stride=2, stem_pool_temporal_stride=2, se_ratio=0.0, use_self_gating=None, init_stochastic_depth_rate=0.0, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, ) network = resnet_3d.ResNet3DY(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = resnet_3d.ResNet3DY.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,950
36.273585
79
py
models
models-master/official/projects/const_cl/modeling/backbones/nn_blocks_3d_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet.""" from absl.testing import parameterized import tensorflow as tf from official.projects.const_cl.modeling.backbones import nn_blocks_3d class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks_3d.BottleneckBlock3D, 1, 1, 2, True, 0.2, 0.1), (nn_blocks_3d.BottleneckBlock3D, 3, 2, 1, False, 0.0, 0.0), ) def test_bottleneck_block_creation(self, block_fn, temporal_kernel_size, temporal_strides, spatial_strides, use_self_gating, se_ratio, stochastic_depth): temporal_size = 16 spatial_size = 128 filters = 256 inputs = tf.keras.Input( shape=(temporal_size, spatial_size, spatial_size, filters * 4), batch_size=1) block = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_size, temporal_strides=temporal_strides, spatial_strides=spatial_strides, use_self_gating=use_self_gating, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth) features = block(inputs) self.assertAllEqual([ 1, temporal_size // temporal_strides, spatial_size // spatial_strides, spatial_size // spatial_strides, filters * 4 ], features.shape.as_list()) vnames = [v.name for v in block.trainable_variables] expected_names = [ 'bottleneck_block3d/temporal_conv/kernel:0', 'bottleneck_block3d/temporal_conv/batch_norm/gamma:0', 'bottleneck_block3d/temporal_conv/batch_norm/beta:0', 'bottleneck_block3d/spatial_conv/kernel:0', 'bottleneck_block3d/spatial_conv/batch_norm/gamma:0', 'bottleneck_block3d/spatial_conv/batch_norm/beta:0', 'bottleneck_block3d/expand_conv/kernel:0', 'bottleneck_block3d/expand_conv/batch_norm/gamma:0', 'bottleneck_block3d/expand_conv/batch_norm/beta:0' ] self.assertContainsSubset(expected_names, vnames) if __name__ == '__main__': tf.test.main()
2,687
36.859155
78
py
models
models-master/official/projects/const_cl/modeling/backbones/resnet_3d.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of 3D Residual Networks.""" from typing import Any, Callable, List, Optional, Tuple import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.projects.const_cl.modeling.backbones import nn_blocks_3d from official.vision.modeling.backbones import factory from official.vision.modeling.backbones import resnet_3d from official.vision.modeling.layers import nn_layers layers = tf.keras.layers RESNET_SPECS = resnet_3d.RESNET_SPECS @tf.keras.utils.register_keras_serializable(package='Vision') class ResNet3DY(tf.keras.Model): """Creates a 3D ResNet family model with branched res5 block.""" def __init__( self, model_id: int, temporal_strides: List[int], temporal_kernel_sizes: List[Tuple[int]], use_self_gating: Optional[List[int]] = None, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, None, 3]), stem_type: str = 'v0', stem_conv_temporal_kernel_size: int = 5, stem_conv_temporal_stride: int = 2, stem_pool_temporal_stride: int = 2, init_stochastic_depth_rate: float = 0.0, activation: str = 'relu', se_ratio: Optional[float] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a 3D ResNet model. Args: model_id: An `int` of depth of ResNet backbone model. temporal_strides: A list of integers that specifies the temporal strides for all 3d blocks. temporal_kernel_sizes: A list of tuples that specifies the temporal kernel sizes for all 3d blocks in different block groups. use_self_gating: A list of booleans to specify applying self-gating module or not in each block group. If None, self-gating is not applied. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to `v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187). stem_conv_temporal_kernel_size: An `int` of temporal kernel size for the first conv layer. stem_conv_temporal_stride: An `int` of temporal stride for the first conv layer. stem_pool_temporal_stride: An `int` of temporal stride for the first pool layer. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. activation: A `str` of name of the activation function. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._model_id = model_id self._temporal_strides = temporal_strides self._temporal_kernel_sizes = temporal_kernel_sizes self._input_specs = input_specs self._stem_type = stem_type self._stem_conv_temporal_kernel_size = stem_conv_temporal_kernel_size self._stem_conv_temporal_stride = stem_conv_temporal_stride self._stem_pool_temporal_stride = stem_pool_temporal_stride self._use_self_gating = use_self_gating self._se_ratio = se_ratio self._init_stochastic_depth_rate = init_stochastic_depth_rate self._use_sync_bn = use_sync_bn self._activation = activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon if use_sync_bn: self._norm = layers.experimental.SyncBatchNormalization else: self._norm = layers.BatchNormalization self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 # Build ResNet3D backbone. inputs = tf.keras.Input(shape=input_specs.shape[1:]) self._build_model(inputs) def _build_model(self, inputs): """Builds model architecture. Args: inputs: the Keras input spec. Returns: endpoints: A dictionary of backbone endpoint features. """ # Build stem. self._build_stem(inputs, stem_type=self._stem_type) temporal_kernel_size = 1 if self._stem_pool_temporal_stride == 1 else 3 self._max_pool = layers.MaxPool3D( pool_size=[temporal_kernel_size, 3, 3], strides=[self._stem_pool_temporal_stride, 2, 2], padding='same') # Build intermediate blocks and endpoints. resnet_specs = RESNET_SPECS[self._model_id] if len(self._temporal_strides) != len(resnet_specs) or len( self._temporal_kernel_sizes) != len(resnet_specs): raise ValueError( 'Number of blocks in temporal specs should equal to resnet_specs.') self._blocks = {} for i, resnet_spec in enumerate(resnet_specs): if resnet_spec[0] == 'bottleneck3d': block_fn = nn_blocks_3d.BottleneckBlock3D else: raise ValueError('Block fn `{}` is not supported.'.format( resnet_spec[0])) use_self_gating = ( self._use_self_gating[i] if self._use_self_gating else False) self._blocks[f'res_{i+2}'] = self._build_block_group( inputs=inputs, filters=resnet_spec[1], temporal_kernel_sizes=self._temporal_kernel_sizes[i], temporal_strides=self._temporal_strides[i], spatial_strides=(1 if i == 0 else 2), block_fn=block_fn, block_repeats=resnet_spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 5), use_self_gating=use_self_gating, name='res_{}'.format(i + 2)) # Duplicate res5 block. resnet_specs = RESNET_SPECS[self._model_id] resnet_spec = resnet_specs[-1] i = len(resnet_specs) - 1 if resnet_spec[0] == 'bottleneck3d': block_fn = nn_blocks_3d.BottleneckBlock3D else: raise ValueError('Block fn `{}` is not supported.'.format( resnet_spec[0])) use_self_gating = ( self._use_self_gating[i] if self._use_self_gating else False) block_layers = self._build_block_group( inputs=inputs, filters=resnet_spec[1], temporal_kernel_sizes=self._temporal_kernel_sizes[i], temporal_strides=self._temporal_strides[i], spatial_strides=(1 if i == 0 else 2), block_fn=block_fn, block_repeats=resnet_spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 5), use_self_gating=use_self_gating, name='res_{}_1'.format(i + 2)) self._res_5_1_layers = block_layers def _build_stem(self, inputs, stem_type): """Builds stem layer.""" del inputs # Build stem. if stem_type == 'v0': self._stem_conv = layers.Conv3D( filters=64, kernel_size=[self._stem_conv_temporal_kernel_size, 7, 7], strides=[self._stem_conv_temporal_stride, 2, 2], use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, name='stem') self._stem_bn = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, name='stem/batch_norm') self._stem_activation = tf_utils.get_activation(self._activation) else: raise ValueError(f'Stem type {stem_type} not supported.') def _build_block_group( self, inputs: tf.Tensor, filters: int, temporal_kernel_sizes: Tuple[int], temporal_strides: int, spatial_strides: int, block_fn: Callable[ ..., tf.keras.layers.Layer] = nn_blocks_3d.BottleneckBlock3D, block_repeats: int = 1, stochastic_depth_drop_rate: float = 0.0, use_self_gating: bool = False, name: str = 'block_group'): """Creates one group of blocks for the ResNet3D model. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. filters: An `int` of number of filters for the first convolution of the layer. temporal_kernel_sizes: A tuple that specifies the temporal kernel sizes for each block in the current group. temporal_strides: An `int` of temporal strides for the first convolution in this group. spatial_strides: An `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`. block_repeats: An `int` of number of blocks contained in the layer. stochastic_depth_drop_rate: A `float` of drop rate of the current block group. use_self_gating: A `bool` that specifies whether to apply self-gating module or not. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ del inputs if len(temporal_kernel_sizes) != block_repeats: raise ValueError( 'Number of elements in temporal_kernel_sizes must equal to ' 'block_repeats.') # Only apply self-gating module in the last block. use_self_gating_list = [False] * (block_repeats - 1) + [use_self_gating] name = 'cell' block_layers = {} block_layers[f'{name}_0'] = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_sizes[0], temporal_strides=temporal_strides, spatial_strides=spatial_strides, stochastic_depth_drop_rate=stochastic_depth_drop_rate, use_self_gating=use_self_gating_list[0], se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, name=f'{name}_0') for i in range(1, block_repeats): block_layers[f'{name}_{i}'] = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_sizes[i], temporal_strides=1, spatial_strides=1, stochastic_depth_drop_rate=stochastic_depth_drop_rate, use_self_gating=use_self_gating_list[i], se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, name=f'{name}_{i}') return block_layers def call(self, inputs: tf.Tensor, training: bool = False, mask: Any = None): """Calls ResNet3DY model.""" del mask x = self._stem_conv(inputs, training=training) x = self._stem_bn(x, training=training) x = self._stem_activation(x) x = self._max_pool(x) res4 = None endpoints = {} for i, block_layers in enumerate(self._blocks.values()): for block_fn in block_layers.values(): x = block_fn(x, training=training) endpoints[f'{i + 2}'] = x if i + 2 == 4: res4 = x for block_fn in self._res_5_1_layers.values(): res4 = block_fn(res4, training=training) endpoints['5_1'] = res4 return endpoints def get_config(self): config_dict = { 'model_id': self._model_id, 'temporal_strides': self._temporal_strides, 'temporal_kernel_sizes': self._temporal_kernel_sizes, 'stem_type': self._stem_type, 'stem_conv_temporal_kernel_size': self._stem_conv_temporal_kernel_size, 'stem_conv_temporal_stride': self._stem_conv_temporal_stride, 'stem_pool_temporal_stride': self._stem_pool_temporal_stride, 'use_self_gating': self._use_self_gating, 'se_ratio': self._se_ratio, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @factory.register_backbone_builder('resnet_3dy') def build_resnet3dy( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds ResNet 3d-Y backbone from a config.""" backbone_cfg = backbone_config.get() # Flatten configs before passing to the backbone. temporal_strides = [] temporal_kernel_sizes = [] use_self_gating = [] for block_spec in backbone_cfg.block_specs: temporal_strides.append(block_spec.temporal_strides) temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes) use_self_gating.append(block_spec.use_self_gating) return ResNet3DY( model_id=backbone_cfg.model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, use_self_gating=use_self_gating, input_specs=input_specs, stem_type=backbone_cfg.stem_type, stem_conv_temporal_kernel_size=backbone_cfg .stem_conv_temporal_kernel_size, stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride, stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, se_ratio=backbone_cfg.se_ratio, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
15,599
38.795918
80
py
models
models-master/official/projects/const_cl/modeling/heads/instance_reconstructor.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The instance feature reconstructor head.""" from typing import Mapping import tensorflow as tf from official.projects.const_cl.modeling.heads import transformer_decoder from official.vision.modeling.layers import roi_aligner def _get_shape(x): """Helper function to return shape of a given tensor.""" static = x.shape.as_list() dynamic = tf.shape(x) return [dynamic[i] if s is None else s for i, s in enumerate(static)] class InstanceReconstructor(tf.keras.layers.Layer): """The SSL head for reconstructing contextualized instance representations.""" def __init__(self, context_level: int = 1, # parameters for projector num_output_channels: int = 1024, # parameters for RoiAligner crop_size: int = 4, sample_offset: float = 0.5, # parameters for TxDecoder num_tx_channels: int = 128, num_tx_layers: int = 3, num_tx_heads: int = 3, use_bias: bool = True, activation: str = 'gelu', dropout_rate: float = 0.0, layer_norm_epsilon: float = 1e-6, use_positional_embedding: bool = True, normalize_inputs: bool = True, **kwargs): """InstanceReconstructor SSL head initializer. Args: context_level: the number of context frame to use. num_output_channels: the number of final output channels. crop_size: the ROI aligner crop size. sample_offset: the ROI aligner sample offset. num_tx_channels: the Transformer decoder head channels. num_tx_layers: the number of Transformer decoder layers. num_tx_heads: the number of Transformer decoder heads per layer. use_bias: whether to use bias. activation: the activation function to use. dropout_rate: the dropout rate. layer_norm_epsilon: the layer norm epsilon. use_positional_embedding: whether to use positional embedding. normalize_inputs: whether to normalize input embeddings. **kwargs: the kwargs. """ super().__init__(**kwargs) self._normalize_inputs = normalize_inputs self._context_level = context_level self._num_output_channels = num_output_channels self._crop_size = crop_size self._sample_offset = sample_offset self._num_tx_channels = num_tx_channels self._num_tx_layers = num_tx_layers self._num_tx_heads = num_tx_heads self._use_bias = use_bias self._activation = activation self._dropout_rate = dropout_rate self._layer_norm_epsilon = layer_norm_epsilon self._use_positional_embedding = use_positional_embedding self._roi_aligner = roi_aligner.MultilevelROIAligner( crop_size=crop_size, sample_offset=sample_offset) if self._use_positional_embedding: self._spatial_mlp = [ tf.keras.layers.Dense( 4, use_bias=True, activation='relu', name='spatial_mlp_l1'), tf.keras.layers.Dense( 8, use_bias=True, name='spatial_mlp_l2')] self._temporal_mlp = [ tf.keras.layers.Dense( 4, use_bias=True, activation='relu', name='temporal_mlp_l1'), tf.keras.layers.Dense( 8, use_bias=True, name='temporal_mlp_l2')] self._attention_decoder = transformer_decoder.TransformerDecoder( num_channels=num_tx_channels, num_layers=num_tx_layers, num_heads=num_tx_heads, use_bias=use_bias, activation=activation, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon) self._projection_layer = tf.keras.layers.Dense(num_output_channels) def _get_memory_embeddings(self, inputs: tf.Tensor) -> tf.Tensor: """Uniformly samples frames to construct memory embeddings.""" if self._context_level % 2 == 0: raise ValueError('context_level should be specified as odd number.') num_frames = tf.shape(inputs)[1] keyframe_index = num_frames // 2 stride = num_frames // self._context_level start = self._context_level // 2 * -1 stop = self._context_level // 2 + 1 # exclusive memories = [] for idx in range(start, stop): idx = idx * stride + keyframe_index memories.append(inputs[:, idx, ...]) memories = tf.stack(memories, axis=1) return memories def _add_positional_embedding(self, inputs: tf.Tensor) -> tf.Tensor: """Adds positional embeddings to the inputs tensor.""" # Compute the locations using meshgrid. b, t, h, w = _get_shape(inputs)[:4] mesh = tf.meshgrid(tf.range(t), tf.range(h), tf.range(w), indexing='ij') position = tf.cast( tf.tile( tf.expand_dims(tf.stack(mesh, axis=-1), axis=0), [b, 1, 1, 1, 1]), tf.float32) # Make the positions relative to center point. # The mean of all position coordinates would be the center point anyway center_position = tf.reduce_mean(position, axis=[1, 2, 3], keepdims=True) position -= center_position # Apply learneable layers. temporal_position = position[..., :1] for mlp in self._temporal_mlp: temporal_position = mlp(temporal_position) spatial_position = position[..., 1:] for mlp in self._spatial_mlp: spatial_position = mlp(spatial_position) return tf.concat([inputs, temporal_position, spatial_position], axis=-1) def _keyframe_roi_pooling(self, features: tf.Tensor, boxes: tf.Tensor, training: bool = True) -> tf.Tensor: """Pools ROI features on the keyframe. Args: features: a 5D tensor in shape [B, T, H, W, C]. boxes: normalized box coordinates, a 4D tensor in shape [B, T', N, 4]. training: whether in training mode. Returns: roi_feature: pooled ROI-features in shape [B, N, C]. """ if features.shape.ndims != 5: raise ValueError('Expected features is a rank-5 tensor. Got shape %s' % features.shape) keyframe_index = tf.shape(boxes)[1] // 2 t, h, w = _get_shape(features)[1:4] roi_features = {'0': features[:, t // 2, ...]} keyframe_boxes = boxes[:, keyframe_index, ...] unnormalized_boxes = keyframe_boxes * tf.convert_to_tensor( [h, w, h, w], keyframe_boxes.dtype) # roi_features in shape [B, N, h, w, C] roi_features = self._roi_aligner( roi_features, unnormalized_boxes, training=training) roi_shape = _get_shape(roi_features) # Perform average_pooling on ROI-pooled features. roi_features = tf.reshape(roi_features, [-1] + roi_shape[2:]) roi_features = tf.reduce_mean(roi_features, axis=[1, 2]) roi_features = tf.reshape(roi_features, roi_shape[:2] + roi_shape[-1:]) return roi_features def call(self, inputs: Mapping[str, tf.Tensor], training: bool = False) -> Mapping[str, tf.Tensor]: """Forward calls. Args: inputs: the inputs dictionary contains 'features': the instance embeddings in shape [2*B, T', H, W, C]. 'instances_positions': the instance boxes in shape [2*B, T, N, 4]. 'instances_mask': the validity mask for each instance position, in [2*B, T, N]. training: whether in training mode. Returns: the context-guided reconstructed instance representations. """ dense_embeddings_raw = inputs['features'] instances_position = inputs['instances_position'] instances_mask = inputs['instances_mask'] if self._normalize_inputs: dense_embeddings_raw = tf.math.l2_normalize(dense_embeddings_raw, axis=-1) def _keyframe_temporal_pooling(inputs): t = tf.shape(inputs)[1] // 2 return inputs[:, t:t+1, ...] dense_embeddings = _keyframe_temporal_pooling(dense_embeddings_raw) instances_position = _keyframe_temporal_pooling(instances_position) instances_mask = _keyframe_temporal_pooling(instances_mask) instances_mask_a, instances_mask_b = tf.split( tf.squeeze(instances_mask, axis=1), num_or_size_splits=2, axis=0) inst_embeddings = self._keyframe_roi_pooling( features=dense_embeddings, boxes=instances_position, training=training) inst_embeddings_a, inst_embeddings_b = tf.split(inst_embeddings, 2, axis=0) memory = self._get_memory_embeddings(dense_embeddings_raw) # Add the positional embeddings before roi_pooling and tx_decoder. if self._use_positional_embedding: memory = self._add_positional_embedding(memory) memory_a, memory_b = tf.split(memory, 2, axis=0) # Reconstruct inst_a2b by querying in memory_b. inst_embeddings_a2b = self._attention_decoder( inputs=inst_embeddings_a, memory=memory_b, training=training) inst_embeddings_a2b = inst_embeddings_a2b['hidden_states'][-1] inst_embeddings_a2b = self._projection_layer( inst_embeddings_a2b, training=training) # Reconstruct inst_b2a by querying in memory_a. inst_embeddings_b2a = self._attention_decoder( inputs=inst_embeddings_b, memory=memory_a, training=training) inst_embeddings_b2a = inst_embeddings_b2a['hidden_states'][-1] inst_embeddings_b2a = self._projection_layer( inst_embeddings_b2a, training=training) outputs = { 'inst_a2b': inst_embeddings_a2b, 'inst_b2a': inst_embeddings_b2a, 'inst_a': inst_embeddings_a, 'inst_b': inst_embeddings_b, 'masks_a': instances_mask_a, 'masks_b': instances_mask_b, } return outputs
10,168
37.812977
80
py
models
models-master/official/projects/const_cl/modeling/heads/transformer_decoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition for Transformer heads.""" from typing import Any, Mapping, Optional, Union, List, Sequence from absl import logging import tensorflow as tf def _get_shape(x: tf.Tensor): """Helper function to return shape of a given tensor.""" static = x.shape.as_list() dynamic = tf.shape(x) return [dynamic[i] if s is None else s for i, s in enumerate(static)] class DecoderUnit(tf.keras.layers.Layer): """Constructs the decoder MHA module used in Transformer layers.""" def __init__(self, num_channels: int, use_bias: bool, dropout_rate: float, activation: str, layer_norm_epsilon: float, **kwargs): super().__init__(**kwargs) self._num_channels = num_channels self._use_bias = use_bias self._dropout_rate = dropout_rate self._activation = activation self._layer_norm_epsilon = layer_norm_epsilon def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Builds the layer. Args: input_shape: the input shape for the keras tensor. """ # Query, key, and value mapping. self.layer_q = tf.keras.layers.Dense( self._num_channels, use_bias=self._use_bias, activation=None, name='query') self.layer_k = tf.keras.layers.Dense( self._num_channels, use_bias=self._use_bias, activation=None, name='key') self.layer_v = tf.keras.layers.Dense( self._num_channels, use_bias=self._use_bias, activation=None, name='value') self.dropout = tf.keras.layers.Dropout(self._dropout_rate) # Note here is a different behavior for contrib_layers.layer_norm and # tf.keras.layers.LayerNormalization, where by default, the former # calculates mean/variance across all axes except the first one # (batch axis), while the latter one computes statistics only on the last # axis. self.layer_norm = tf.keras.layers.LayerNormalization( epsilon=self._layer_norm_epsilon, name='layer_norm') self.ffn1 = tf.keras.layers.Dense( self._num_channels, use_bias=self._use_bias, activation=self._activation, name='ffn1') self.ffn2 = tf.keras.layers.Dense( self._num_channels, use_bias=self._use_bias, activation=None, name='ffn2') super().build(input_shape) def call(self, query: tf.Tensor, memory: Optional[tf.Tensor], training: bool = False) -> Mapping[str, tf.Tensor]: """Forward pass of the Transformer decoder unit. Args: query: the input query tensor. memory: the input memory tensor for key/value pairs. If None, self-attention will be performed. training: whether in training mode. Returns: outputs: the output dictionary contains 'hidden_states' and 'attention weights' matrix. """ if memory is None: memory = query tensor_q = self.layer_q(query) # (bs, qlen, inner_dim) tensor_k = self.layer_k(memory) # (bs, klen, inner_dim) tensor_v = self.layer_v(memory) # (bs, klen, inner_dim) scores = tf.matmul(tensor_q, tensor_k, transpose_b=True) # Scales attention_scores. dk = tf.cast(_get_shape(tensor_k)[-1], dtype=scores.dtype) scores = scores / tf.math.sqrt(dk) # Shape: (bs, seq_len, seq_len) attention_weights = tf.nn.softmax(scores, axis=-1) # Shape: (bs, seq_len, dim_per_head) attention_features = tf.matmul(attention_weights, tensor_v) # Shape: (bs, seq_len, seq_len) attention_features = self.dropout(attention_features, training=training) hidden_states = attention_features + tensor_q hidden_states = self.layer_norm(hidden_states) # Shape: (bs, seq_len, out_dim) hidden_states = self.ffn1(hidden_states) hidden_states = self.ffn2(hidden_states) outputs = { 'hidden_states': hidden_states, 'attention_weights': attention_weights, } return outputs def get_config(self) -> Mapping[str, Any]: """Gets class config parameters.""" config_dict = { 'num_channels': self._num_channels, 'use_bias': self._use_bias, 'dropout_rate': self._dropout_rate, 'activation': self._activation, 'layer_norm_epsilon': self._layer_norm_epsilon, } return config_dict @classmethod def from_config(cls, config: Mapping[str, Any]): """Factory constructor from config.""" return cls(**config) class TransformerDecoderLayer(tf.keras.layers.Layer): """Constructs the main Transformer decoder module which includes MHA + FFN.""" def __init__(self, num_channels: int, num_heads: int, use_bias: bool, activation: str, dropout_rate: float, layer_norm_epsilon: float, name: str = 'decoder_layer', **kwargs): super().__init__(name=name) self._num_channels = num_channels self._num_heads = num_heads self._use_bias = use_bias self._activation = activation self._dropout_rate = dropout_rate self._layer_norm_epsilon = layer_norm_epsilon self._name = name self._mha_units = [] for i in range(num_heads): self._mha_units.append( DecoderUnit( num_channels=num_channels, use_bias=use_bias, dropout_rate=dropout_rate, activation=activation, layer_norm_epsilon=layer_norm_epsilon, name='mha_{}'.format(i))) def call( self, inputs: tf.Tensor, memory: Optional[tf.Tensor] = None, training: bool = False ) -> Mapping[str, Union[tf.Tensor, Sequence[tf.Tensor]]]: """Forward pass of the Transformer decoder layer. Args: inputs: the input query tensor. memory: the input memory tensor for key/value pairs. If None, self-attention will be performed. training: whether in training mode. Returns: outputs: the output dictionary contains 'hidden_states' and 'attention weights' matrix. """ if memory is None: logging.info('No memory tokens are provided. Performing self-attention ' 'on input tokens in TransfomerDecoder.') all_head_feats = [] all_head_attentions = [] for i in range(self._num_heads): outputs = self._mha_units[i]( query=inputs, memory=memory, training=training) all_head_feats.append(outputs['hidden_states']) all_head_attentions.append(outputs['attention_weights']) outputs = { 'hidden_states': tf.concat(all_head_feats, axis=-1), 'attention_weights': all_head_attentions, } return outputs def get_config(self) -> Mapping[str, Any]: """Gets class config parameters.""" config_dict = { 'num_channels': self._num_channels, 'num_heads': self._num_heads, 'use_bias': self._use_bias, 'activation': self._activation, 'dropout_rate': self._dropout_rate, 'layer_norm_epsilon': self._layer_norm_epsilon, 'name': self._name, } return config_dict @classmethod def from_config(cls, config: Mapping[str, Any]): """Factory constructor from config.""" return cls(**config) class TransformerDecoder(tf.keras.layers.Layer): """Constructs the final Transformer decoder stack.""" def __init__(self, num_channels: int, num_layers: int, num_heads: int, use_bias: bool, activation: str, dropout_rate: float, layer_norm_epsilon: float, name: str = 'transformer_decoder', **kwargs): super().__init__(name=name) self._num_channels = num_channels self._num_layers = num_layers self._num_heads = num_heads self._use_bias = use_bias self._activation = activation self._dropout_rate = dropout_rate self._layer_norm_epsilon = layer_norm_epsilon self._layers = [] for n in range(self._num_layers): self._layers.append( TransformerDecoderLayer( num_channels=num_channels, num_heads=num_heads, use_bias=use_bias, activation=activation, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon, name='layer_{}'.format(n))) def call(self, inputs: tf.Tensor, memory: Optional[tf.Tensor] = None, training: bool = False) -> Mapping[str, Sequence[tf.Tensor]]: """Forward pass of the Transformer decoder. Args: inputs: the input query tensor. memory: the input memory tensor for key/value pairs. If None, self-attention will be performed. training: whether in training mode. Returns: outputs: the output dictionary contains 'hidden_states' and 'attention weights' matrix. """ all_hidden_states = () all_attentions = () memory_shape = _get_shape(memory) memory = tf.reshape(memory, [memory_shape[0], -1, memory_shape[-1]]) hidden_states = inputs for layer in self._layers: layer_outputs = layer(inputs=hidden_states, memory=memory, training=training) # layer_outputs is a dictionary with the following keys: # hidden_states, self_attention_weights hidden_states = layer_outputs['hidden_states'] all_attentions += (layer_outputs['attention_weights'],) # Add last layer all_hidden_states += (hidden_states,) outputs = { 'hidden_states': all_hidden_states, 'attention_weights': all_attentions, } return outputs def get_config(self) -> Mapping[str, Any]: """Gets class config parameters.""" config_dict = { 'num_channels': self._num_channels, 'num_layers': self._num_layers, 'num_heads': self._num_heads, 'use_bias': self._use_bias, 'activation': self._activation, 'dropout_rate': self._dropout_rate, 'layer_norm_epsilon': self._layer_norm_epsilon, } return config_dict @classmethod def from_config(cls, config: Mapping[str, Any]): """Factory constructor from config.""" return cls(**config)
10,974
30.90407
80
py
models
models-master/official/projects/const_cl/modeling/heads/simple.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Constructs simple heads.""" from typing import Any, Mapping, Optional import tensorflow as tf from official.modeling import tf_utils class MLP(tf.keras.layers.Layer): """Constructs the Multi-Layer Perceptron head.""" def __init__(self, num_hidden_layers: int, num_hidden_channels: int, num_output_channels: int, use_sync_bn: bool, norm_momentum: float = 0.99, norm_epsilon: float = 1e-5, activation: Optional[str] = None, normalize_inputs: bool = False, **kwargs): """Multi-Layer Perceptron initialization. Args: num_hidden_layers: the number of hidden layers in the MLP. num_hidden_channels: the number of hidden nodes. num_output_channels: the number of final output nodes. use_sync_bn: whether to use sync batch norm. norm_momentum: the batch norm momentum. norm_epsilon: the batch norm epsilon. activation: the activation function. normalize_inputs: whether to normalize inputs. **kwargs: keyword arguments to be passed. """ super().__init__(**kwargs) self._num_hidden_layers = num_hidden_layers self._num_hidden_channels = num_hidden_channels self._num_output_channels = num_output_channels self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._activation = activation self._normalize_inputs = normalize_inputs self._layers = [] # MLP hidden layers for _ in range(num_hidden_layers): self._layers.append( tf.keras.layers.Dense(num_hidden_channels, use_bias=False)) if use_sync_bn: self._layers.append( tf.keras.layers.experimental.SyncBatchNormalization( momentum=norm_momentum, epsilon=norm_epsilon)) else: self._layers.append( tf.keras.layers.BatchNormalization( momentum=norm_momentum, epsilon=norm_epsilon)) if activation is not None: self._layers.append(tf_utils.get_activation(activation)) # Projection head self._layers.append(tf.keras.layers.Dense(num_output_channels)) def call(self, inputs: tf.Tensor, training: bool) -> tf.Tensor: """Forward calls with N-D inputs tensor.""" if self._normalize_inputs: inputs = tf.nn.l2_normalize(inputs, axis=-1) for layer in self._layers: if isinstance(layer, tf.keras.layers.Layer): inputs = layer(inputs, training=training) else: # activation inputs = layer(inputs) return inputs def get_config(self) -> Mapping[str, Any]: """Gets class config parameters.""" config_dict = { 'num_hidden_layer': self._num_hidden_layer, 'num_hidden_channels': self._num_hidden_channels, 'num_output_channels': self._num_output_channels, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'activation': self._activation, 'normalize_inputs': self._normalize_inputs} return config_dict @classmethod def from_config(cls, config: Mapping[str, Any]): """Factory constructor from config.""" return cls(**config)
3,926
34.7
74
py
models
models-master/official/projects/const_cl/tasks/const_cl.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Video ssl pretrain task definition.""" from typing import Any, Optional from absl import logging import tensorflow as tf from official.core import input_reader from official.core import task_factory from official.projects.const_cl.configs import const_cl as exp_cfg from official.projects.const_cl.datasets import video_ssl_inputs from official.projects.const_cl.losses import losses from official.projects.video_ssl.tasks import pretrain as video_ssl_pretrain from official.vision.modeling import factory_3d @task_factory.register_task_cls(exp_cfg.ConstCLPretrainTask) class ConstCLPretrainTask(video_ssl_pretrain.VideoSSLPretrainTask): """A task for video contextualized ssl pretraining.""" def build_model(self): """Builds video ssl pretraining model.""" common_input_shape = [ d1 if d1 == d2 else None for d1, d2 in zip(self.task_config.train_data.feature_shape, self.task_config.validation_data.feature_shape) ] num_frames = common_input_shape[0] num_instances = self.task_config.train_data.num_instances input_specs_dict = { 'image': tf.keras.layers.InputSpec(shape=[None] + common_input_shape), 'instances_position': tf.keras.layers.InputSpec( shape=[None, num_frames, num_instances, 4]), 'instances_mask': tf.keras.layers.InputSpec(shape=[None, num_frames, num_instances]), } logging.info('Build model input %r', common_input_shape) model = factory_3d.build_model( self.task_config.model.model_type, input_specs=input_specs_dict, model_config=self.task_config.model, num_classes=self.task_config.train_data.num_classes) return model def build_inputs(self, params: exp_cfg.DataConfig, input_context: Optional[Any] = None) -> tf.data.Dataset: """Builds ConST-CL SSL input.""" parser = video_ssl_inputs.Parser(input_params=params) postprocess_fn = video_ssl_inputs.PostBatchProcessor(params) reader = input_reader.InputReader( params, dataset_fn=self._get_dataset_fn(params), decoder_fn=self._get_decoder_fn(params), parser_fn=parser.parse_fn(params.is_training), postprocess_fn=postprocess_fn) dataset = reader.read(input_context=input_context) return dataset def build_losses(self, model_outputs, num_replicas, model): """Sparse categorical cross entropy loss. Args: model_outputs: Output logits of the model. num_replicas: distributed replica number. model: keras model for calculating weight decay. Returns: The total loss tensor. """ all_losses = {} logging_metrics = {} losses_config = self.task_config.losses total_loss = None global_loss = losses.ContrastiveLoss( normalize_inputs=losses_config.normalize_inputs, temperature=losses_config.global_temperature) local_loss = losses.InstanceContrastiveLoss( normalize_inputs=losses_config.normalize_inputs, temperature=losses_config.local_temperature) # Compute global loss. global_inputs = model_outputs['global_embeddings'] global_loss_dict = global_loss(inputs=global_inputs, num_replicas=num_replicas) # Compute local loss. local_inputs = { 'instances_a2b': model_outputs['inst_a2b'], 'instances_b2a': model_outputs['inst_b2a'], 'instances_a': model_outputs['inst_a'], 'instances_b': model_outputs['inst_b'], 'masks_a': model_outputs['masks_a'], 'masks_b': model_outputs['masks_b'], } local_loss_dict = local_loss(predictions=local_inputs, num_replicas=num_replicas) # Compute regularization loss. reg_loss = losses_config.l2_weight_decay * tf.add_n([ tf.nn.l2_loss(v) for v in model.trainable_variables if 'kernel' in v.name]) total_loss = (global_loss_dict['loss'] * losses_config.global_weight + local_loss_dict['loss'] * losses_config.local_weight + reg_loss) all_losses.update({ 'total_loss': total_loss }) all_losses[self.loss] = total_loss logging_metrics['regularization_loss'] = reg_loss for k, v in global_loss_dict.items(): logging_metrics['global_loss/' + k] = v for k, v in local_loss_dict.items(): logging_metrics['local_loss/' + k] = v return all_losses, logging_metrics def build_metrics(self, training=True): """Gets streaming metrics for training/validation.""" metrics = [ tf.keras.metrics.Mean(name='regularization_loss'), tf.keras.metrics.Mean(name='global_loss/loss'), tf.keras.metrics.Mean(name='global_loss/contrastive_accuracy'), tf.keras.metrics.Mean(name='global_loss/contrastive_entropy'), tf.keras.metrics.Mean(name='local_loss/loss'), tf.keras.metrics.Mean(name='local_loss/positive_similarity_mean'), tf.keras.metrics.Mean(name='local_loss/positive_similarity_max'), tf.keras.metrics.Mean(name='local_loss/positive_similarity_min'), tf.keras.metrics.Mean(name='local_loss/negative_similarity_mean'), tf.keras.metrics.Mean(name='local_loss/negative_similarity_max'), tf.keras.metrics.Mean(name='local_loss/negative_similarity_min'), ] return metrics def process_metrics(self, metrics, contrastive_metrics): """Processes and updates metrics.""" for metric in metrics: v = contrastive_metrics[metric.name] metric.update_state(v) def train_step(self, inputs, model, optimizer, metrics=None): """Forward and backward pass. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, _ = inputs num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) # Casting output layer as float32 is necessary when mixed_precision is # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32. outputs = tf.nest.map_structure( lambda x: tf.cast(x, tf.float32), outputs) all_losses, contrastive_metrics = self.build_losses( model_outputs=outputs, num_replicas=num_replicas, model=model) scaled_loss = all_losses[self.loss] # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance( optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient before apply_gradients when LossScaleOptimizer is # used. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = all_losses if metrics: self.process_metrics(metrics, contrastive_metrics) logs.update({m.name: m.result() for m in metrics}) return logs def validation_step(self, inputs, model, metrics=None): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ raise NotImplementedError def inference_step(self, features, model): """Performs the forward step.""" raise NotImplementedError
8,419
36.422222
79
py
models
models-master/official/projects/nhnet/decoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for projects.nhnet.decoder.""" import numpy as np import tensorflow as tf from official.nlp.modeling import layers from official.projects.nhnet import configs from official.projects.nhnet import decoder from official.projects.nhnet import utils class DecoderTest(tf.test.TestCase): def setUp(self): super(DecoderTest, self).setUp() self._config = utils.get_test_params() def test_transformer_decoder(self): decoder_block = decoder.TransformerDecoder( num_hidden_layers=self._config.num_hidden_layers, hidden_size=self._config.hidden_size, num_attention_heads=self._config.num_attention_heads, intermediate_size=self._config.intermediate_size, intermediate_activation=self._config.hidden_act, hidden_dropout_prob=self._config.hidden_dropout_prob, attention_probs_dropout_prob=self._config.attention_probs_dropout_prob, initializer_range=self._config.initializer_range) decoder_block.build(None) self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers) def test_bert_decoder(self): seq_length = 10 encoder_input_ids = tf.keras.layers.Input( shape=(seq_length,), name="encoder_input_ids", dtype=tf.int32) target_ids = tf.keras.layers.Input( shape=(seq_length,), name="target_ids", dtype=tf.int32) encoder_outputs = tf.keras.layers.Input( shape=(seq_length, self._config.hidden_size), name="all_encoder_outputs", dtype=tf.float32) embedding_lookup = layers.OnDeviceEmbedding( vocab_size=self._config.vocab_size, embedding_width=self._config.hidden_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=self._config.initializer_range), name="word_embeddings") cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")( encoder_input_ids) self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( target_ids) inputs = dict( attention_bias=cross_attention_bias, self_attention_bias=self_attention_bias, target_ids=target_ids, all_encoder_outputs=encoder_outputs) decoder_layer = decoder.Decoder(self._config, embedding_lookup) outputs = decoder_layer(inputs) model_inputs = dict( encoder_input_ids=encoder_input_ids, target_ids=target_ids, all_encoder_outputs=encoder_outputs) model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test") self.assertLen(decoder_layer.trainable_weights, 30) # Forward path. fake_inputs = { "encoder_input_ids": np.zeros((2, 10), dtype=np.int32), "target_ids": np.zeros((2, 10), dtype=np.int32), "all_encoder_outputs": np.zeros((2, 10, 16), dtype=np.float32), } output_tensor = model(fake_inputs) self.assertEqual(output_tensor.shape, (2, 10, 16)) def test_multi_doc_decoder(self): self._config = utils.get_test_params(cls=configs.NHNetConfig) seq_length = 10 num_docs = 5 encoder_input_ids = tf.keras.layers.Input( shape=(num_docs, seq_length), name="encoder_input_ids", dtype=tf.int32) target_ids = tf.keras.layers.Input( shape=(seq_length,), name="target_ids", dtype=tf.int32) encoder_outputs = tf.keras.layers.Input( shape=(num_docs, seq_length, self._config.hidden_size), name="all_encoder_outputs", dtype=tf.float32) embedding_lookup = layers.OnDeviceEmbedding( vocab_size=self._config.vocab_size, embedding_width=self._config.hidden_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=self._config.initializer_range), name="word_embeddings") doc_attention_probs = tf.keras.layers.Input( shape=(self._config.num_decoder_attn_heads, seq_length, num_docs), name="doc_attention_probs", dtype=tf.float32) cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")( encoder_input_ids) self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( target_ids) inputs = dict( attention_bias=cross_attention_bias, self_attention_bias=self_attention_bias, target_ids=target_ids, all_encoder_outputs=encoder_outputs, doc_attention_probs=doc_attention_probs) decoder_layer = decoder.Decoder(self._config, embedding_lookup) outputs = decoder_layer(inputs) model_inputs = dict( encoder_input_ids=encoder_input_ids, target_ids=target_ids, all_encoder_outputs=encoder_outputs, doc_attention_probs=doc_attention_probs) model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test") self.assertLen(decoder_layer.trainable_weights, 30) # Forward path. fake_inputs = { "encoder_input_ids": np.zeros((2, num_docs, seq_length), dtype=np.int32), "target_ids": np.zeros((2, seq_length), dtype=np.int32), "all_encoder_outputs": np.zeros((2, num_docs, seq_length, 16), dtype=np.float32), "doc_attention_probs": np.zeros( (2, self._config.num_decoder_attn_heads, seq_length, num_docs), dtype=np.float32) } output_tensor = model(fake_inputs) self.assertEqual(output_tensor.shape, (2, seq_length, 16)) if __name__ == "__main__": tf.test.main()
6,024
39.709459
79
py
models
models-master/official/projects/nhnet/utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility helpers for Bert2Bert.""" from typing import Optional, Text from absl import logging import tensorflow as tf from official.legacy.bert import configs from official.modeling.hyperparams import params_dict from official.projects.nhnet import configs as nhnet_configs def get_bert_config_from_params( params: params_dict.ParamsDict) -> configs.BertConfig: """Converts a BertConfig to ParamsDict.""" return configs.BertConfig.from_dict(params.as_dict()) def get_test_params(cls=nhnet_configs.BERT2BERTConfig): return cls.from_args(**nhnet_configs.UNITTEST_CONFIG) # pylint: disable=protected-access def encoder_common_layers(transformer_block): return [ transformer_block._attention_layer, transformer_block._attention_layer_norm, transformer_block._intermediate_dense, transformer_block._output_dense, transformer_block._output_layer_norm ] # pylint: enable=protected-access def initialize_bert2bert_from_pretrained_bert( bert_encoder: tf.keras.layers.Layer, bert_decoder: tf.keras.layers.Layer, init_checkpoint: Optional[Text] = None) -> None: """Helper function to initialze Bert2Bert from Bert pretrained checkpoint.""" ckpt = tf.train.Checkpoint(model=bert_encoder) logging.info( "Checkpoint file %s found and restoring from " "initial checkpoint for core model.", init_checkpoint) status = ckpt.restore(init_checkpoint) # Expects the bert model is a subset of checkpoint as pooling layer is # not used. status.assert_existing_objects_matched() logging.info("Loading from checkpoint file completed.") # Saves a checkpoint with transformer layers. encoder_layers = [] for transformer_block in bert_encoder.transformer_layers: encoder_layers.extend(encoder_common_layers(transformer_block)) # Restores from the checkpoint with encoder layers. decoder_layers_to_initialize = [] for decoder_block in bert_decoder.decoder.layers: decoder_layers_to_initialize.extend( decoder_block.common_layers_with_encoder()) if len(decoder_layers_to_initialize) != len(encoder_layers): raise ValueError( "Source encoder layers with %d objects does not match destination " "decoder layers with %d objects." % (len(decoder_layers_to_initialize), len(encoder_layers))) for dest_layer, source_layer in zip(decoder_layers_to_initialize, encoder_layers): try: dest_layer.set_weights(source_layer.get_weights()) except ValueError as e: logging.error( "dest_layer: %s failed to set weights from " "source_layer: %s as %s", dest_layer.name, source_layer.name, str(e))
3,294
35.611111
79
py
models
models-master/official/projects/nhnet/models_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for projects.nhnet.models.""" import os from absl import logging from absl.testing import parameterized import numpy as np import tensorflow as tf # pylint: disable=g-direct-tensorflow-import from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations # pylint: enable=g-direct-tensorflow-import from official.projects.nhnet import configs from official.projects.nhnet import models from official.projects.nhnet import utils def all_strategy_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ],) def distribution_forward_path(strategy, model, inputs, batch_size, mode="train"): dataset = tf.data.Dataset.from_tensor_slices((inputs)) dataset = dataset.batch(batch_size) dataset = strategy.experimental_distribute_dataset(dataset) @tf.function def test_step(inputs): """Calculates evaluation metrics on distributed devices.""" def _test_step_fn(inputs): """Replicated accuracy calculation.""" return model(inputs, mode=mode, training=False) outputs = strategy.run(_test_step_fn, args=(inputs,)) return tf.nest.map_structure(strategy.experimental_local_results, outputs) return [test_step(inputs) for inputs in dataset] def process_decoded_ids(predictions, end_token_id): """Transforms decoded tensors to lists ending with END_TOKEN_ID.""" if isinstance(predictions, tf.Tensor): predictions = predictions.numpy() flatten_ids = predictions.reshape((-1, predictions.shape[-1])) results = [] for ids in flatten_ids: ids = list(ids) if end_token_id in ids: ids = ids[:ids.index(end_token_id)] results.append(ids) return results class Bert2BertTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(Bert2BertTest, self).setUp() self._config = utils.get_test_params() def test_model_creation(self): model = models.create_bert2bert_model(params=self._config) fake_ids = np.zeros((2, 10), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, "target_ids": fake_ids, } model(fake_inputs) @combinations.generate(all_strategy_combinations()) def test_bert2bert_train_forward(self, distribution): seq_length = 10 # Defines the model inside distribution strategy scope. with distribution.scope(): # Forward path. batch_size = 2 batches = 4 fake_ids = np.zeros((batch_size * batches, seq_length), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, "target_ids": fake_ids, } model = models.create_bert2bert_model(params=self._config) results = distribution_forward_path(distribution, model, fake_inputs, batch_size) logging.info("Forward path results: %s", str(results)) self.assertLen(results, batches) def test_bert2bert_decoding(self): seq_length = 10 self._config.override( { "beam_size": 3, "len_title": seq_length, "alpha": 0.6, }, is_strict=False) batch_size = 2 fake_ids = np.zeros((batch_size, seq_length), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, } self._config.override({ "padded_decode": False, "use_cache": False, }, is_strict=False) model = models.create_bert2bert_model(params=self._config) ckpt = tf.train.Checkpoint(model=model) # Initializes variables from checkpoint to keep outputs deterministic. init_checkpoint = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt")) ckpt.restore(init_checkpoint).assert_existing_objects_matched() top_ids, scores = model(fake_inputs, mode="predict") self._config.override({ "padded_decode": False, "use_cache": True, }, is_strict=False) model = models.create_bert2bert_model(params=self._config) ckpt = tf.train.Checkpoint(model=model) ckpt.restore(init_checkpoint).assert_existing_objects_matched() cached_top_ids, cached_scores = model(fake_inputs, mode="predict") self.assertEqual( process_decoded_ids(top_ids, self._config.end_token_id), process_decoded_ids(cached_top_ids, self._config.end_token_id)) self.assertAllClose(scores, cached_scores) self._config.override({ "padded_decode": True, "use_cache": True, }, is_strict=False) model = models.create_bert2bert_model(params=self._config) ckpt = tf.train.Checkpoint(model=model) ckpt.restore(init_checkpoint).assert_existing_objects_matched() padded_top_ids, padded_scores = model(fake_inputs, mode="predict") self.assertEqual( process_decoded_ids(top_ids, self._config.end_token_id), process_decoded_ids(padded_top_ids, self._config.end_token_id)) self.assertAllClose(scores, padded_scores) @combinations.generate(all_strategy_combinations()) def test_bert2bert_eval(self, distribution): seq_length = 10 padded_decode = isinstance( distribution, (tf.distribute.TPUStrategy, tf.distribute.experimental.TPUStrategy)) self._config.override( { "beam_size": 3, "len_title": seq_length, "alpha": 0.6, "padded_decode": padded_decode, }, is_strict=False) # Defines the model inside distribution strategy scope. with distribution.scope(): # Forward path. batch_size = 2 batches = 4 fake_ids = np.zeros((batch_size * batches, seq_length), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, } model = models.create_bert2bert_model(params=self._config) results = distribution_forward_path( distribution, model, fake_inputs, batch_size, mode="predict") self.assertLen(results, batches) results = distribution_forward_path( distribution, model, fake_inputs, batch_size, mode="eval") self.assertLen(results, batches) class NHNetTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(NHNetTest, self).setUp() self._nhnet_config = configs.NHNetConfig() self._nhnet_config.override(utils.get_test_params().as_dict()) self._bert2bert_config = configs.BERT2BERTConfig() self._bert2bert_config.override(utils.get_test_params().as_dict()) def _count_params(self, layer, trainable_only=True): """Returns the count of all model parameters, or just trainable ones.""" if not trainable_only: return layer.count_params() else: return int( np.sum([ tf.keras.backend.count_params(p) for p in layer.trainable_weights ])) def test_create_nhnet_layers(self): single_doc_bert, single_doc_decoder = models.get_bert2bert_layers( self._bert2bert_config) multi_doc_bert, multi_doc_decoder = models.get_nhnet_layers( self._nhnet_config) # Expects multi-doc encoder/decoder have the same number of parameters as # single-doc encoder/decoder. self.assertEqual( self._count_params(multi_doc_bert), self._count_params(single_doc_bert)) self.assertEqual( self._count_params(multi_doc_decoder), self._count_params(single_doc_decoder)) def test_checkpoint_restore(self): bert2bert_model = models.create_bert2bert_model(self._bert2bert_config) ckpt = tf.train.Checkpoint(model=bert2bert_model) init_checkpoint = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt")) nhnet_model = models.create_nhnet_model( params=self._nhnet_config, init_checkpoint=init_checkpoint) source_weights = ( bert2bert_model.bert_layer.trainable_weights + bert2bert_model.decoder_layer.trainable_weights) dest_weights = ( nhnet_model.bert_layer.trainable_weights + nhnet_model.decoder_layer.trainable_weights) for source_weight, dest_weight in zip(source_weights, dest_weights): self.assertAllClose(source_weight.numpy(), dest_weight.numpy()) @combinations.generate(all_strategy_combinations()) def test_nhnet_train_forward(self, distribution): seq_length = 10 # Defines the model inside distribution strategy scope. with distribution.scope(): # Forward path. batch_size = 2 num_docs = 2 batches = 4 fake_ids = np.zeros((batch_size * batches, num_docs, seq_length), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, "target_ids": np.zeros((batch_size * batches, seq_length * 2), dtype=np.int32), } model = models.create_nhnet_model(params=self._nhnet_config) results = distribution_forward_path(distribution, model, fake_inputs, batch_size) logging.info("Forward path results: %s", str(results)) self.assertLen(results, batches) @combinations.generate(all_strategy_combinations()) def test_nhnet_eval(self, distribution): seq_length = 10 padded_decode = isinstance( distribution, (tf.distribute.TPUStrategy, tf.distribute.experimental.TPUStrategy)) self._nhnet_config.override( { "beam_size": 4, "len_title": seq_length, "alpha": 0.6, "multi_channel_cross_attention": True, "padded_decode": padded_decode, }, is_strict=False) # Defines the model inside distribution strategy scope. with distribution.scope(): # Forward path. batch_size = 2 num_docs = 2 batches = 4 fake_ids = np.zeros((batch_size * batches, num_docs, seq_length), dtype=np.int32) fake_inputs = { "input_ids": fake_ids, "input_mask": fake_ids, "segment_ids": fake_ids, "target_ids": np.zeros((batch_size * batches, 5), dtype=np.int32), } model = models.create_nhnet_model(params=self._nhnet_config) results = distribution_forward_path( distribution, model, fake_inputs, batch_size, mode="predict") self.assertLen(results, batches) results = distribution_forward_path( distribution, model, fake_inputs, batch_size, mode="eval") self.assertLen(results, batches) if __name__ == "__main__": tf.test.main()
11,786
35.267692
80
py
models
models-master/official/projects/nhnet/decoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer decoder that mimics a BERT encoder, to load BERT checkpoints.""" import tensorflow as tf from official.legacy.transformer import model_utils as transformer_utils from official.modeling import tf_utils from official.nlp.modeling import layers class TransformerDecoder(tf.keras.layers.Layer): """Transformer decoder stack.""" def __init__(self, num_hidden_layers=12, hidden_size=768, num_attention_heads=12, intermediate_size=3072, intermediate_activation="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, attend_to_last_layer=True, multi_channel_cross_attention=False, **kwargs): super(TransformerDecoder, self).__init__(**kwargs) self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.intermediate_activation = tf_utils.get_activation( intermediate_activation) self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.attend_to_last_layer = attend_to_last_layer self.multi_channel_cross_attention = multi_channel_cross_attention def build(self, unused_input_shapes): """Implements build() for the layer.""" self.layers = [] for i in range(self.num_hidden_layers): self.layers.append( layers.TransformerDecoderBlock( num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, intermediate_activation=self.intermediate_activation, dropout_rate=self.hidden_dropout_prob, attention_dropout_rate=self.attention_probs_dropout_prob, kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=self.initializer_range), multi_channel_cross_attention=self.multi_channel_cross_attention, name=("layer_%d" % i))) super(TransformerDecoder, self).build(unused_input_shapes) def call(self, inputs, cache=None, decode_loop_step=None): """Return the output of the decoder layer stacks. Args: inputs: A dictionary of inputs. `decoder_inputs` is a tf.int32 tensor for input ids. `encoder_outputs` is a list of tensors with shape [batch_size, input_length, hidden_size]. `self_attention_mask` is the bias for decoder self-attention layer. [1, 1, target_length, target_length]. `attention_mask` is the bias for encoder-decoder attention layer, [batch_size, 1, 1, input_length]. cache: A dictionary of cache tensors, including key & value attentions. decode_loop_step: an integer to indicate the step inside a decoding loop. Returns: Output of decoder layer stack. float32 tensor with shape [batch_size, target_length, hidden_size] """ decoder_inputs = inputs["decoder_inputs"] encoder_outputs = inputs["encoder_outputs"] self_attention_mask = inputs["self_attention_mask"] attention_mask = inputs["attention_mask"] decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3) batch_size = decoder_shape[0] decoder_length = decoder_shape[1] def _to_bert_self_attention_mask(matrix): """[1, 1, target_len, target_len] -> [bs, target_len, target_len].""" matrix = tf.squeeze(matrix, axis=[1]) matrix = tf.tile(matrix, [batch_size, 1, 1]) return matrix def _to_bert_encdec_attention_mask(matrix): """[bs, 1, 1, input_len] -> [bs, target_len, input_len].""" if self.multi_channel_cross_attention: matrix = tf.expand_dims(matrix, axis=2) matrix = tf.tile(matrix, [1, 1, decoder_length, 1]) else: matrix = tf.squeeze(matrix, axis=[1]) matrix = tf.tile(matrix, [1, decoder_length, 1]) return matrix attention_mask = _to_bert_encdec_attention_mask(attention_mask) self_attention_mask = _to_bert_self_attention_mask(self_attention_mask) output_tensor = decoder_inputs for layer_idx in range(self.num_hidden_layers): if self.attend_to_last_layer: memory = encoder_outputs[-1] else: memory = encoder_outputs[layer_idx] if self.multi_channel_cross_attention: transformer_inputs = [ output_tensor, memory, attention_mask, self_attention_mask, inputs["doc_attention_probs"] ] else: transformer_inputs = [ output_tensor, memory, attention_mask, self_attention_mask ] # Gets the cache for decoding. if cache is None: output_tensor, _ = self.layers[layer_idx](transformer_inputs) else: cache_layer_idx = str(layer_idx) output_tensor, cache[cache_layer_idx] = self.layers[layer_idx]( transformer_inputs, cache=cache[cache_layer_idx], decode_loop_step=decode_loop_step) return output_tensor, cache def get_attention_bias(input_tensor, bias_type, padding_value=0, max_length=None): """A helper function to get various attention bias tensors.""" if bias_type not in ("single_cross", "multi_cross", "decoder_self"): raise ValueError("Invalid attention bias type: %s" % bias_type) if bias_type == "single_cross": length = tf_utils.get_shape_list(input_tensor, expected_rank=2)[1] bias = transformer_utils.get_padding_bias( input_tensor, padding_value=padding_value) elif bias_type == "multi_cross": length = tf_utils.get_shape_list(input_tensor, expected_rank=3)[2] padding = transformer_utils.get_padding( input_tensor, padding_value=padding_value) bias = padding * -1e9 else: if max_length is not None: length = max_length else: length = tf_utils.get_shape_list(input_tensor, expected_rank=2)[1] bias = transformer_utils.get_decoder_self_attention_bias(length) return tf.where(bias < 0, tf.zeros_like(bias), tf.ones_like(bias)) class AttentionBias(tf.keras.layers.Layer): def __init__(self, bias_type, **kwargs): super(AttentionBias, self).__init__(**kwargs) self.bias_type = bias_type def call(self, inputs): return get_attention_bias(inputs, self.bias_type) class EmbeddingPostprocessor(tf.keras.layers.Layer): """Performs various post-processing on a word embedding tensor.""" def __init__(self, use_type_embeddings=False, token_type_vocab_size=None, use_position_embeddings=True, max_position_embeddings=512, dropout_prob=0.0, initializer_range=0.02, initializer=None, **kwargs): super(EmbeddingPostprocessor, self).__init__(**kwargs) self.use_type_embeddings = use_type_embeddings self.token_type_vocab_size = token_type_vocab_size self.use_position_embeddings = use_position_embeddings self.max_position_embeddings = max_position_embeddings self.dropout_prob = dropout_prob self.initializer_range = initializer_range if not initializer: self.initializer = tf.keras.initializers.TruncatedNormal( stddev=initializer_range) else: self.initializer = initializer if self.use_type_embeddings and not self.token_type_vocab_size: raise ValueError("If `use_type_embeddings` is True, then " "`token_type_vocab_size` must be specified.") def build(self, input_shapes): """Implements build() for the layer.""" (word_embeddings_shape, _) = input_shapes width = word_embeddings_shape.as_list()[-1] self.type_embeddings = None if self.use_type_embeddings: self.type_embeddings = self.add_weight( "type_embeddings", shape=[self.token_type_vocab_size, width], initializer=tf.keras.initializers.TruncatedNormal( stddev=self.initializer_range), dtype=self.dtype) self.position_embeddings = None if self.use_position_embeddings: self.position_embeddings = self.add_weight( "position_embeddings", shape=[self.max_position_embeddings, width], initializer=tf.keras.initializers.TruncatedNormal( stddev=self.initializer_range), dtype=self.dtype) self.output_layer_norm = tf.keras.layers.LayerNormalization( name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) self.output_dropout = tf.keras.layers.Dropout( rate=self.dropout_prob, dtype=tf.float32) super(EmbeddingPostprocessor, self).build(input_shapes) def __call__(self, word_embeddings, token_type_ids=None, **kwargs): inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids]) return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs) # pytype: disable=attribute-error # typed-keras def call(self, inputs): """Implements call() for the layer.""" unpacked_inputs = tf_utils.unpack_inputs(inputs) word_embeddings = unpacked_inputs[0] token_type_ids = unpacked_inputs[1] input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = word_embeddings if self.use_type_embeddings: flat_token_type_ids = tf.reshape(token_type_ids, [-1]) token_type_embeddings = tf.gather(self.type_embeddings, flat_token_type_ids) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if self.use_position_embeddings: position_embeddings = tf.expand_dims( tf.slice(self.position_embeddings, [0, 0], [seq_length, width]), axis=0) output += position_embeddings output = self.output_layer_norm(output) output = self.output_dropout(output) return output class Decoder(tf.keras.layers.Layer): """The decoder network which can reuse encoder embeddings for target.""" def __init__(self, config, embedding_lookup=None, **kwargs): super(Decoder, self).__init__(**kwargs) self.config = config # Shares vocabulary embedding. self.embedding_lookup = None if embedding_lookup: self.embedding_lookup = embedding_lookup def build(self, unused_input_shapes): """Implements build() for the layer.""" if self.embedding_lookup is None: self.embedding_lookup = layers.OnDeviceEmbedding( vocab_size=self.config.vocab_size, embedding_width=self.config.hidden_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=self.config.initializer_range), name="target_embeddings") self.embedding_postprocessor = EmbeddingPostprocessor( use_type_embeddings=False, use_position_embeddings=True, max_position_embeddings=self.config.max_position_embeddings, dropout_prob=self.config.hidden_dropout_prob, initializer=tf.keras.initializers.VarianceScaling( scale=self.config.initializer_gain, mode="fan_avg", distribution="uniform"), name="embedding_postprocessor") # Decoder can use a different intermediate size. self.multi_channel_cross_attention = self.config.get( "multi_channel_cross_attention", False) self.decoder = TransformerDecoder( num_hidden_layers=self.config.num_decoder_layers, hidden_size=self.config.hidden_size, num_attention_heads=self.config.num_decoder_attn_heads, intermediate_size=self.config.decoder_intermediate_size, intermediate_activation=self.config.hidden_act, hidden_dropout_prob=self.config.hidden_dropout_prob, attention_probs_dropout_prob=self.config.attention_probs_dropout_prob, initializer_range=self.config.initializer_range, multi_channel_cross_attention=self.multi_channel_cross_attention, name="decoder") super(Decoder, self).build(unused_input_shapes) def _decoding_step_time_signal(self, target_embeds, decode_loop_step): """Applies time signal (positional embeddings) for decoded embeddings.""" # TODO(hongkuny): migrate to keras bert and design a module to handle this. output = target_embeds if self.embedding_postprocessor.use_position_embeddings: position_embeddings = tf.gather( self.embedding_postprocessor.position_embeddings, [decode_loop_step]) # Broadcasts to all sequences inside a batch. output += position_embeddings output = self.embedding_postprocessor.output_layer_norm(output) output = self.embedding_postprocessor.output_dropout(output) return output def call(self, inputs, cache=None, decode_loop_step=None, padded_decode=False): """Implements call() for the layer. Args: inputs: a list of input tensors. cache: A dictionary of cache tensors, including key & value attentions. Due to the limit of keras, we uses the side effect to update cache and states of tensors will be mutated. decode_loop_step: an integer to indicate the step inside a decoding loop. padded_decode: a boolean indicates if the pass is for padded decoding. Returns: Decoder output tensors. """ attention_bias = inputs["attention_bias"] target_ids = inputs["target_ids"] all_encoder_outputs = inputs["all_encoder_outputs"] self_attention_bias = inputs["self_attention_bias"] if not isinstance(all_encoder_outputs, list): all_encoder_outputs = [all_encoder_outputs] target_embeds = self.embedding_lookup(target_ids) if decode_loop_step is None: target_embeds = self.embedding_postprocessor(target_embeds) else: target_embeds = self._decoding_step_time_signal(target_embeds, decode_loop_step) decoder_inputs = dict( decoder_inputs=target_embeds, encoder_outputs=all_encoder_outputs, self_attention_mask=self_attention_bias, attention_mask=attention_bias) if self.multi_channel_cross_attention: decoder_inputs["doc_attention_probs"] = inputs["doc_attention_probs"] decode_outputs, cache = self.decoder( decoder_inputs, cache, decode_loop_step if padded_decode else None) return decode_outputs
15,312
40.274933
123
py
models
models-master/official/projects/nhnet/models.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """tf.keras Models for NHNet.""" from typing import Optional, Text from absl import logging import gin import tensorflow as tf from official.modeling import tf_utils from official.modeling.hyperparams import params_dict from official.nlp.modeling import networks from official.nlp.modeling.layers import multi_channel_attention from official.nlp.modeling.ops import beam_search from official.projects.nhnet import configs from official.projects.nhnet import decoder from official.projects.nhnet import utils def embedding_linear(embedding_matrix, x): """Uses embeddings as linear transformation weights.""" with tf.name_scope("presoftmax_linear"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] hidden_size = tf.shape(x)[2] vocab_size = tf.shape(embedding_matrix)[0] x = tf.reshape(x, [-1, hidden_size]) logits = tf.matmul(x, embedding_matrix, transpose_b=True) return tf.reshape(logits, [batch_size, length, vocab_size]) def _add_sos_to_seq(seq, start_token_id): """Add a start sequence token while keeping seq length.""" batch_size = tf.shape(seq)[0] seq_len = tf.shape(seq)[1] sos_ids = tf.ones([batch_size], tf.int32) * start_token_id targets = tf.concat([tf.expand_dims(sos_ids, axis=1), seq], axis=1) targets = targets[:, :-1] tf.assert_equal(tf.shape(targets), (batch_size, seq_len)) return targets def remove_sos_from_seq(seq, pad_token_id): """Remove the start sequence token while keeping seq length.""" batch_size, seq_len = tf_utils.get_shape_list(seq, expected_rank=2) # remove <s> targets = seq[:, 1:] # pad pad_ids = tf.ones([batch_size], tf.int32) * pad_token_id targets = tf.concat([targets, tf.expand_dims(pad_ids, axis=1)], axis=1) tf.assert_equal(tf.shape(targets), (batch_size, seq_len)) return targets class Bert2Bert(tf.keras.Model): """Bert2Bert encoder decoder model for training.""" def __init__(self, params, bert_layer, decoder_layer, name=None): super(Bert2Bert, self).__init__(name=name) self.params = params if not bert_layer.built: raise ValueError("bert_layer should be built.") if not decoder_layer.built: raise ValueError("decoder_layer should be built.") self.bert_layer = bert_layer self.decoder_layer = decoder_layer def get_config(self): return {"params": self.params.as_dict()} def get_decode_logits(self, decoder_inputs, ids, decoder_self_attention_bias, step, cache=None): if cache: if self.params.get("padded_decode", False): bias_shape = decoder_self_attention_bias.shape.as_list() self_attention_bias = tf.slice( decoder_self_attention_bias, [0, 0, step, 0], [bias_shape[0], bias_shape[1], 1, bias_shape[3]]) else: self_attention_bias = decoder_self_attention_bias[:, :, step:step + 1, :step + 1] # Sets decoder input to the last generated IDs. decoder_input = ids[:, -1:] else: self_attention_bias = decoder_self_attention_bias[:, :, :step + 1, :step + 1] decoder_input = ids decoder_inputs["target_ids"] = decoder_input decoder_inputs["self_attention_bias"] = self_attention_bias if cache: decoder_outputs = self.decoder_layer( decoder_inputs, cache, decode_loop_step=step, padded_decode=self.params.get("padded_decode", False)) else: decoder_outputs = self.decoder_layer(decoder_inputs) logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, decoder_outputs[:, -1:, :]) logits = tf.squeeze(logits, axis=[1]) return logits def _get_symbols_to_logits_fn(self, max_decode_length): """Returns a decoding function that calculates logits of the next tokens.""" # Max decode length should be smaller than the positional embedding max # sequence length. decoder_self_attention_bias = decoder.get_attention_bias( input_tensor=None, bias_type="decoder_self", max_length=max_decode_length) def _symbols_to_logits_fn(ids, i, cache): """Generate logits for next candidate IDs. Args: ids: Current decoded sequences. int tensor with shape [batch_size * beam_size, i + 1] i: Loop index cache: dictionary of values storing the encoder output, encoder-decoder attention bias, and previous decoder attention values. Returns: Tuple of (logits with shape [batch_size * beam_size, vocab_size], updated cache values) """ decoder_inputs = dict( all_encoder_outputs=cache["all_encoder_outputs"], attention_bias=cache["attention_bias"]) logits = self.get_decode_logits( decoder_inputs, ids, decoder_self_attention_bias, step=i, cache=cache if self.params.use_cache else None) return logits, cache return _symbols_to_logits_fn def train_decode(self, decode_outputs): logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, decode_outputs) decode_output_ids = tf.cast(tf.argmax(logits, axis=-1), tf.int32) output_log_probs = tf.nn.log_softmax(logits, axis=-1) return logits, decode_output_ids, output_log_probs def predict_decode(self, start_token_ids, cache): symbols_to_logits_fn = self._get_symbols_to_logits_fn(self.params.len_title) # Use beam search to find the top beam_size sequences and scores. decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=start_token_ids, initial_cache=cache, vocab_size=self.params.vocab_size, beam_size=self.params.beam_size, alpha=self.params.alpha, max_decode_length=self.params.len_title, padded_decode=self.params.get("padded_decode", False), eos_id=self.params.end_token_id) return decoded_ids, scores def _get_logits_for_decode_ids(self, decoder_inputs, top_decoded_ids): """Returns the log probabilities for ids.""" target_ids = _add_sos_to_seq(top_decoded_ids, self.params.start_token_id) decoder_inputs["self_attention_bias"] = decoder.get_attention_bias( target_ids, bias_type="decoder_self") decoder_inputs["target_ids"] = target_ids decoder_outputs = self.decoder_layer(decoder_inputs) logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, decoder_outputs) return logits def _init_cache(self, batch_size): num_heads = self.params.num_decoder_attn_heads dim_per_head = self.params.hidden_size // num_heads init_decode_length = ( self.params.len_title if self.params.get("padded_decode", False) else 0) cache = {} for layer in range(self.params.num_decoder_layers): cache[str(layer)] = { "key": tf.zeros( [batch_size, init_decode_length, num_heads, dim_per_head], dtype=tf.float32), "value": tf.zeros( [batch_size, init_decode_length, num_heads, dim_per_head], dtype=tf.float32) } return cache def call(self, inputs, mode="train"): """Implements call(). Args: inputs: a dictionary of tensors. mode: string, an enum for mode, train/eval. Returns: logits, decode_output_ids, output_log_probs for training. top_decoded_ids for eval. """ input_ids = inputs["input_ids"] input_mask = inputs["input_mask"] segment_ids = inputs["segment_ids"] all_encoder_outputs, _ = self.bert_layer( [input_ids, input_mask, segment_ids]) if mode not in ("train", "eval", "predict"): raise ValueError("Invalid call mode: %s" % mode) encoder_decoder_attention_bias = decoder.get_attention_bias( input_ids, bias_type="single_cross", padding_value=self.params.pad_token_id) if mode == "train": self_attention_bias = decoder.get_attention_bias( inputs["target_ids"], bias_type="decoder_self") decoder_inputs = dict( attention_bias=encoder_decoder_attention_bias, all_encoder_outputs=all_encoder_outputs, target_ids=inputs["target_ids"], self_attention_bias=self_attention_bias) decoder_outputs = self.decoder_layer(decoder_inputs) return self.train_decode(decoder_outputs) batch_size = tf.shape(input_ids)[0] start_token_ids = tf.ones([batch_size], tf.int32) * self.params.start_token_id # Add encoder output and attention bias to the cache. if self.params.use_cache: cache = self._init_cache(batch_size) else: cache = {} cache["all_encoder_outputs"] = all_encoder_outputs cache["attention_bias"] = encoder_decoder_attention_bias decoded_ids, scores = self.predict_decode(start_token_ids, cache) if mode == "predict": return decoded_ids[:, :self.params.beam_size, 1:], scores[:, :self.params.beam_size] decoder_inputs = dict( attention_bias=encoder_decoder_attention_bias, all_encoder_outputs=all_encoder_outputs) top_decoded_ids = decoded_ids[:, 0, 1:] return self._get_logits_for_decode_ids(decoder_inputs, top_decoded_ids) class NHNet(Bert2Bert): """NHNet model which performs multi-doc decoding.""" def __init__(self, params, bert_layer, decoder_layer, name=None): super(NHNet, self).__init__(params, bert_layer, decoder_layer, name=name) self.doc_attention = multi_channel_attention.VotingAttention( num_heads=params.num_decoder_attn_heads, head_size=params.hidden_size // params.num_decoder_attn_heads) def _expand_doc_attention_probs(self, doc_attention_probs, target_length): """Expands doc attention probs to fit the decoding sequence length.""" doc_attention_probs = tf.expand_dims( doc_attention_probs, axis=[1]) # [B, 1, A] doc_attention_probs = tf.expand_dims( doc_attention_probs, axis=[2]) # [B, 1, 1, A] return tf.tile(doc_attention_probs, [1, self.params.num_decoder_attn_heads, target_length, 1]) def _get_symbols_to_logits_fn(self, max_decode_length): """Returns a decoding function that calculates logits of the next tokens.""" # Max decode length should be smaller than the positional embedding max # sequence length. decoder_self_attention_bias = decoder.get_attention_bias( input_tensor=None, bias_type="decoder_self", max_length=max_decode_length) def _symbols_to_logits_fn(ids, i, cache): """Generate logits for next candidate IDs.""" if self.params.use_cache: target_length = 1 else: target_length = i + 1 decoder_inputs = dict( doc_attention_probs=self._expand_doc_attention_probs( cache["doc_attention_probs"], target_length), all_encoder_outputs=cache["all_encoder_outputs"], attention_bias=cache["attention_bias"]) logits = self.get_decode_logits( decoder_inputs, ids, decoder_self_attention_bias, step=i, cache=cache if self.params.use_cache else None) return logits, cache return _symbols_to_logits_fn def call(self, inputs, mode="training"): # pytype: disable=signature-mismatch # overriding-default-value-checks input_shape = tf_utils.get_shape_list(inputs["input_ids"], expected_rank=3) batch_size, num_docs, len_passage = (input_shape[0], input_shape[1], input_shape[2]) input_ids = tf.reshape(inputs["input_ids"], [-1, len_passage]) input_mask = tf.reshape(inputs["input_mask"], [-1, len_passage]) segment_ids = tf.reshape(inputs["segment_ids"], [-1, len_passage]) all_encoder_outputs, _ = self.bert_layer( [input_ids, input_mask, segment_ids]) encoder_outputs = tf.reshape( all_encoder_outputs[-1], [batch_size, num_docs, len_passage, self.params.hidden_size]) doc_attention_mask = tf.reshape( tf.cast( tf.math.count_nonzero(input_mask, axis=1, dtype=tf.int32) > 2, tf.int32), [batch_size, num_docs]) doc_attention_probs = self.doc_attention(encoder_outputs, doc_attention_mask) encoder_decoder_attention_bias = decoder.get_attention_bias( inputs["input_ids"], bias_type="multi_cross", padding_value=self.params.pad_token_id) if mode == "train": target_length = tf_utils.get_shape_list( inputs["target_ids"], expected_rank=2)[1] doc_attention_probs = self._expand_doc_attention_probs( doc_attention_probs, target_length) self_attention_bias = decoder.get_attention_bias( inputs["target_ids"], bias_type="decoder_self") decoder_inputs = dict( attention_bias=encoder_decoder_attention_bias, self_attention_bias=self_attention_bias, target_ids=inputs["target_ids"], all_encoder_outputs=encoder_outputs, doc_attention_probs=doc_attention_probs) decoder_outputs = self.decoder_layer(decoder_inputs) return self.train_decode(decoder_outputs) # Adds encoder output and attention bias to the cache. if self.params.use_cache: cache = self._init_cache(batch_size) else: cache = {} cache["all_encoder_outputs"] = [encoder_outputs] cache["attention_bias"] = encoder_decoder_attention_bias cache["doc_attention_probs"] = doc_attention_probs start_token_ids = tf.ones([batch_size], tf.int32) * self.params.start_token_id decoded_ids, scores = self.predict_decode(start_token_ids, cache) if mode == "predict": return decoded_ids[:, :self.params.beam_size, 1:], scores[:, :self.params.beam_size] top_decoded_ids = decoded_ids[:, 0, 1:] target_length = tf_utils.get_shape_list(top_decoded_ids)[-1] decoder_inputs = dict( attention_bias=encoder_decoder_attention_bias, all_encoder_outputs=[encoder_outputs], doc_attention_probs=self._expand_doc_attention_probs( doc_attention_probs, target_length)) return self._get_logits_for_decode_ids(decoder_inputs, top_decoded_ids) def get_bert2bert_layers(params: configs.BERT2BERTConfig): """Creates a Bert2Bert stem model and returns Bert encoder/decoder. We use funtional-style to create stem model because we need to make all layers built to restore variables in a customized way. The layers are called with placeholder inputs to make them fully built. Args: params: ParamsDict. Returns: two keras Layers, bert_model_layer and decoder_layer """ input_ids = tf.keras.layers.Input( shape=(None,), name="input_ids", dtype=tf.int32) input_mask = tf.keras.layers.Input( shape=(None,), name="input_mask", dtype=tf.int32) segment_ids = tf.keras.layers.Input( shape=(None,), name="segment_ids", dtype=tf.int32) target_ids = tf.keras.layers.Input( shape=(None,), name="target_ids", dtype=tf.int32) bert_config = utils.get_bert_config_from_params(params) bert_model_layer = networks.BertEncoder( vocab_size=bert_config.vocab_size, hidden_size=bert_config.hidden_size, num_layers=bert_config.num_hidden_layers, num_attention_heads=bert_config.num_attention_heads, intermediate_size=bert_config.intermediate_size, activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range), return_all_encoder_outputs=True, name="bert_encoder") all_encoder_outputs, _ = bert_model_layer( [input_ids, input_mask, segment_ids]) # pylint: disable=protected-access decoder_layer = decoder.Decoder(params, bert_model_layer._embedding_layer) # pylint: enable=protected-access cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")( input_ids) self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( target_ids) decoder_inputs = dict( attention_bias=cross_attention_bias, self_attention_bias=self_attention_bias, target_ids=target_ids, all_encoder_outputs=all_encoder_outputs) _ = decoder_layer(decoder_inputs) return bert_model_layer, decoder_layer def get_nhnet_layers(params: configs.NHNetConfig): """Creates a Mult-doc encoder/decoder. Args: params: ParamsDict. Returns: two keras Layers, bert_model_layer and decoder_layer """ input_ids = tf.keras.layers.Input( shape=(None,), name="input_ids", dtype=tf.int32) input_mask = tf.keras.layers.Input( shape=(None,), name="input_mask", dtype=tf.int32) segment_ids = tf.keras.layers.Input( shape=(None,), name="segment_ids", dtype=tf.int32) bert_config = utils.get_bert_config_from_params(params) bert_model_layer = networks.BertEncoder( vocab_size=bert_config.vocab_size, hidden_size=bert_config.hidden_size, num_layers=bert_config.num_hidden_layers, num_attention_heads=bert_config.num_attention_heads, intermediate_size=bert_config.intermediate_size, activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range), return_all_encoder_outputs=True, name="bert_encoder") bert_model_layer([input_ids, input_mask, segment_ids]) input_ids = tf.keras.layers.Input( shape=(None, None), name="input_ids", dtype=tf.int32) all_encoder_outputs = tf.keras.layers.Input((None, None, params.hidden_size), dtype=tf.float32) target_ids = tf.keras.layers.Input( shape=(None,), name="target_ids", dtype=tf.int32) doc_attention_probs = tf.keras.layers.Input( (params.num_decoder_attn_heads, None, None), dtype=tf.float32) # pylint: disable=protected-access decoder_layer = decoder.Decoder(params, bert_model_layer._embedding_layer) # pylint: enable=protected-access cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")( input_ids) self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( target_ids) decoder_inputs = dict( attention_bias=cross_attention_bias, self_attention_bias=self_attention_bias, target_ids=target_ids, all_encoder_outputs=all_encoder_outputs, doc_attention_probs=doc_attention_probs) _ = decoder_layer(decoder_inputs) return bert_model_layer, decoder_layer def create_transformer_model(params, init_checkpoint: Optional[Text] = None ) -> tf.keras.Model: """A helper to create Transformer model.""" bert_layer, decoder_layer = get_bert2bert_layers(params=params) model = Bert2Bert( params=params, bert_layer=bert_layer, decoder_layer=decoder_layer, name="transformer") if init_checkpoint: logging.info( "Checkpoint file %s found and restoring from " "initial checkpoint.", init_checkpoint) ckpt = tf.train.Checkpoint(model=model) ckpt.restore(init_checkpoint).expect_partial() return model def create_bert2bert_model( params: configs.BERT2BERTConfig, cls=Bert2Bert, init_checkpoint: Optional[Text] = None) -> tf.keras.Model: """A helper to create Bert2Bert model.""" bert_layer, decoder_layer = get_bert2bert_layers(params=params) if init_checkpoint: utils.initialize_bert2bert_from_pretrained_bert(bert_layer, decoder_layer, init_checkpoint) return cls( params=params, bert_layer=bert_layer, decoder_layer=decoder_layer, name="bert2bert") def create_nhnet_model( params: configs.NHNetConfig, cls=NHNet, init_checkpoint: Optional[Text] = None) -> tf.keras.Model: """A helper to create NHNet model.""" bert_layer, decoder_layer = get_nhnet_layers(params=params) model = cls( params=params, bert_layer=bert_layer, decoder_layer=decoder_layer, name="nhnet") if init_checkpoint: logging.info( "Checkpoint file %s found and restoring from " "initial checkpoint.", init_checkpoint) if params.init_from_bert2bert: ckpt = tf.train.Checkpoint(model=model) ckpt.restore(init_checkpoint).assert_existing_objects_matched() else: utils.initialize_bert2bert_from_pretrained_bert(bert_layer, decoder_layer, init_checkpoint) return model @gin.configurable def get_model_params(model: Optional[Text] = "bert2bert", config_class=None) -> params_dict.ParamsDict: """Helper function to convert config file to ParamsDict.""" if model == "bert2bert": return configs.BERT2BERTConfig() elif model == "nhnet": return configs.NHNetConfig() elif config_class: return config_class() else: raise KeyError("The model type is not defined: %s" % model) @gin.configurable def create_model(model_type: Text, params, init_checkpoint: Optional[Text] = None): """A factory function to create different types of models.""" if model_type == "bert2bert": return create_bert2bert_model(params, init_checkpoint=init_checkpoint) elif model_type == "nhnet": return create_nhnet_model(params, init_checkpoint=init_checkpoint) elif "transformer" in model_type: return create_transformer_model(params, init_checkpoint=init_checkpoint) else: raise KeyError("The model type is not defined: %s" % model_type)
23,129
38.674099
115
py
models
models-master/official/projects/nhnet/evaluation.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluation for Bert2Bert.""" import os # Import libraries from absl import logging import numpy as np import tensorflow as tf from official.legacy.transformer import metrics as metrics_v2 from official.legacy.transformer.utils import metrics from official.projects.nhnet import input_pipeline from official.projects.nhnet import models def rouge_l_fscore(logits, labels): """ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score. """ predictions = np.argmax(logits, axis=-1) rouge_l_f_score = metrics.rouge_l_sentence_level(predictions, labels) return rouge_l_f_score def rouge_2_fscore(logits, labels): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ predictions = np.argmax(logits, axis=-1) rouge_2_f_score = metrics.rouge_n(predictions, labels) return rouge_2_f_score def bleu_score(logits, labels): """Approximate BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch-size, length_labels] Returns: bleu: int, approx bleu score """ predictions = np.argmax(logits, axis=-1) bleu = metrics.compute_bleu(labels, predictions) return bleu def continuous_eval(strategy, params, model_type, eval_file_pattern=None, batch_size=4, eval_steps=None, model_dir=None, timeout=3000): """Continuously evaluate checkpoints on testing data.""" test_dataset = input_pipeline.get_input_dataset( eval_file_pattern, batch_size=batch_size, params=params, is_training=False, strategy=strategy) with strategy.scope(): model = models.create_model(model_type, params) metric_layer = metrics_v2.MetricLayer(params.vocab_size) eval_summary_writer = tf.summary.create_file_writer( os.path.join(model_dir, "summaries/eval")) global_step = tf.Variable( 0, trainable=False, dtype=tf.int64, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, shape=[]) @tf.function def test_step(inputs): """Calculates evaluation metrics on distributed devices.""" def _test_step_fn(inputs): """Replicated accuracy calculation.""" targets = models.remove_sos_from_seq(inputs["target_ids"], params.pad_token_id) # Using ground truth sequences as targets to calculate logits for accuracy # and perplexity metrics. logits, _, _ = model(inputs, training=False, mode="train") metric_layer([logits, targets]) # Get logits from top beam search results for bleu and rouge metrics. logits = model(inputs, training=False, mode="eval") return targets, logits outputs = strategy.run(_test_step_fn, args=(inputs,)) return tf.nest.map_structure(strategy.experimental_local_results, outputs) metrics_and_funcs = [ (tf.keras.metrics.Mean("bleu", dtype=tf.float32), bleu_score), (tf.keras.metrics.Mean("rouge_2_fscore", dtype=tf.float32), rouge_2_fscore), (tf.keras.metrics.Mean("rouge_l_fscore", dtype=tf.float32), rouge_l_fscore), ] eval_results = {} for latest_checkpoint in tf.train.checkpoints_iterator( model_dir, timeout=timeout): checkpoint = tf.train.Checkpoint(model=model, global_step=global_step) checkpoint.restore(latest_checkpoint).expect_partial() logging.info("Loaded checkpoint %s", latest_checkpoint) for i, inputs in enumerate(test_dataset): if eval_steps and i >= eval_steps: break outputs = test_step(inputs) for metric, func in metrics_and_funcs: for targets, logits in zip(outputs[0], outputs[1]): metric.update_state(func(logits.numpy(), targets.numpy())) with eval_summary_writer.as_default(): step = global_step.numpy() for metric, _ in metrics_and_funcs: eval_results[metric.name] = metric.result().numpy().astype(float) tf.summary.scalar( metric.name, eval_results[metric.name], step=step) for metric in metric_layer.metrics: eval_results[metric.name] = metric.result().numpy().astype(float) tf.summary.scalar( metric.name, eval_results[metric.name], step=step) logging.info("Step %d Metrics= %s", step, str(eval_results)) eval_summary_writer.flush() # Resets metrics. for metric, _ in metrics_and_funcs: metric.reset_states() for metric in metric_layer.metrics: metric.reset_states() return eval_results
6,091
32.472527
80
py
models
models-master/official/projects/nhnet/trainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run NHNet model training and eval.""" import os # Import libraries from absl import app from absl import flags from absl import logging from six.moves import zip import tensorflow as tf from official.common import distribute_utils from official.legacy.transformer import metrics as transformer_metrics from official.modeling.hyperparams import params_dict from official.projects.nhnet import evaluation from official.projects.nhnet import input_pipeline from official.projects.nhnet import models from official.projects.nhnet import optimizer from official.utils.misc import keras_utils FLAGS = flags.FLAGS def define_flags(): """Defines command line flags used by NHNet trainer.""" ## Required parameters flags.DEFINE_enum("mode", "train", ["train", "eval", "train_and_eval"], "Execution mode.") flags.DEFINE_string("train_file_pattern", "", "Train file pattern.") flags.DEFINE_string("eval_file_pattern", "", "Eval file pattern.") flags.DEFINE_string( "model_dir", None, "The output directory where the model checkpoints will be written.") # Model training specific flags. flags.DEFINE_enum( "distribution_strategy", "mirrored", ["tpu", "mirrored"], "Distribution Strategy type to use for training. `tpu` uses TPUStrategy " "for running on TPUs, `mirrored` uses GPUs with single host.") flags.DEFINE_string("tpu", "", "TPU address to connect to.") flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer("train_steps", 100000, "Max train steps") flags.DEFINE_integer("eval_steps", 32, "Number of eval steps per run.") flags.DEFINE_integer("eval_timeout", 3000, "Timeout waiting for checkpoints.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 4, "Total batch size for evaluation.") flags.DEFINE_integer( "steps_per_loop", 1000, "Number of steps per graph-mode loop. Only training step " "happens inside the loop.") flags.DEFINE_integer("checkpoint_interval", 2000, "Checkpointing interval.") flags.DEFINE_integer("len_title", 15, "Title length.") flags.DEFINE_integer("len_passage", 200, "Passage length.") flags.DEFINE_integer("num_encoder_layers", 12, "Number of hidden layers of encoder.") flags.DEFINE_integer("num_decoder_layers", 12, "Number of hidden layers of decoder.") flags.DEFINE_string("model_type", "nhnet", "Model type to choose a model configuration.") flags.DEFINE_integer( "num_nhnet_articles", 5, "Maximum number of articles in NHNet, only used when model_type=nhnet") flags.DEFINE_string( "params_override", default=None, help=("a YAML/JSON string or a YAML file which specifies additional " "overrides over the default parameters")) # Enables MLIR-based TF/XLA bridge. This is part of a soft rollout and will # eventually be the Google-wide default. flags.DEFINE_bool("enable_mlir_bridge", True, "Use MLIR TF/XLA bridge (experimental).") # pylint: disable=protected-access class Trainer(tf.keras.Model): """A training only model.""" def __init__(self, model, params): super(Trainer, self).__init__() self.model = model self.params = params self._num_replicas_in_sync = tf.distribute.get_strategy( ).num_replicas_in_sync def call(self, inputs, mode="train"): return self.model(inputs, mode) def train_step(self, inputs): """The logic for one training step.""" with tf.GradientTape() as tape: logits, _, _ = self(inputs, mode="train", training=True) targets = models.remove_sos_from_seq(inputs["target_ids"], self.params.pad_token_id) loss = transformer_metrics.transformer_loss(logits, targets, self.params.label_smoothing, self.params.vocab_size) # Scales the loss, which results in using the average loss across all # of the replicas for backprop. scaled_loss = loss / self._num_replicas_in_sync tvars = self.trainable_variables grads = tape.gradient(scaled_loss, tvars) self.optimizer.apply_gradients(list(zip(grads, tvars))) if isinstance(self.optimizer, tf.keras.optimizers.experimental.Optimizer): learning_rate = self.optimizer.learning_rate else: learning_rate = self.optimizer._decayed_lr(var_dtype=tf.float32) return { "training_loss": loss, "learning_rate": learning_rate, } def train(params, strategy, dataset=None): """Runs training.""" if not dataset: dataset = input_pipeline.get_input_dataset( FLAGS.train_file_pattern, FLAGS.train_batch_size, params, is_training=True, strategy=strategy) with strategy.scope(): model = models.create_model( FLAGS.model_type, params, init_checkpoint=FLAGS.init_checkpoint) opt = optimizer.create_optimizer(params) trainer = Trainer(model, params) trainer.compile( optimizer=opt, steps_per_execution=FLAGS.steps_per_loop) summary_dir = os.path.join(FLAGS.model_dir, "summaries") summary_callback = tf.keras.callbacks.TensorBoard( summary_dir, update_freq=max(100, FLAGS.steps_per_loop)) checkpoint = tf.train.Checkpoint( model=model, optimizer=opt, global_step=opt.iterations) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=FLAGS.model_dir, max_to_keep=10, step_counter=opt.iterations, checkpoint_interval=FLAGS.checkpoint_interval) if checkpoint_manager.restore_or_initialize(): logging.info("Training restored from the checkpoints in: %s", FLAGS.model_dir) checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager) # Trains the model. steps_per_epoch = min(FLAGS.train_steps, FLAGS.checkpoint_interval) epochs = FLAGS.train_steps // steps_per_epoch history = trainer.fit( x=dataset, steps_per_epoch=steps_per_epoch, epochs=epochs, callbacks=[summary_callback, checkpoint_callback], verbose=2) train_hist = history.history # Gets final loss from training. stats = dict(training_loss=float(train_hist["training_loss"][-1])) return stats def run(): """Runs NHNet using Keras APIs.""" if FLAGS.enable_mlir_bridge: tf.config.experimental.enable_mlir_bridge() strategy = distribute_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, tpu_address=FLAGS.tpu) if strategy: logging.info("***** Number of cores used : %d", strategy.num_replicas_in_sync) params = models.get_model_params(FLAGS.model_type) params = params_dict.override_params_dict( params, FLAGS.params_override, is_strict=True) params.override( { "len_title": FLAGS.len_title, "len_passage": FLAGS.len_passage, "num_hidden_layers": FLAGS.num_encoder_layers, "num_decoder_layers": FLAGS.num_decoder_layers, "passage_list": [chr(ord("b") + i) for i in range(FLAGS.num_nhnet_articles)], }, is_strict=False) stats = {} if "train" in FLAGS.mode: stats = train(params, strategy) if "eval" in FLAGS.mode: timeout = 0 if FLAGS.mode == "train_and_eval" else FLAGS.eval_timeout # Uses padded decoding for TPU. Always uses cache. padded_decode = isinstance(strategy, tf.distribute.TPUStrategy) params.override({ "padded_decode": padded_decode, }, is_strict=False) stats = evaluation.continuous_eval( strategy, params, model_type=FLAGS.model_type, eval_file_pattern=FLAGS.eval_file_pattern, batch_size=FLAGS.eval_batch_size, eval_steps=FLAGS.eval_steps, model_dir=FLAGS.model_dir, timeout=timeout) return stats def main(_): stats = run() if stats: logging.info("Stats:\n%s", stats) if __name__ == "__main__": define_flags() app.run(main)
8,900
35.780992
80
py
models
models-master/official/projects/nhnet/optimizer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Optimizer and learning rate scheduler.""" import tensorflow as tf from official.modeling.hyperparams import params_dict class LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): """Learning rate schedule.""" def __init__(self, initial_learning_rate, hidden_size, warmup_steps): """Initialize configuration of the learning rate schedule. Args: initial_learning_rate: A float, the initial learning rate. hidden_size: An integer, the model dimension in the hidden layers. warmup_steps: An integer, the number of steps required for linear warmup. """ super(LearningRateSchedule, self).__init__() self.initial_learning_rate = initial_learning_rate self.hidden_size = hidden_size self.warmup_steps = tf.cast(warmup_steps, tf.float32) def __call__(self, global_step): """Calculate learning rate with linear warmup and rsqrt decay. Args: global_step: An integer, the current global step used for learning rate calculation. Returns: A float, the learning rate needs to be used for current global step. """ with tf.name_scope('learning_rate_schedule'): global_step = tf.cast(global_step, tf.float32) learning_rate = self.initial_learning_rate learning_rate *= (self.hidden_size**-0.5) # Apply linear warmup learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps) # Apply rsqrt decay learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps)) return learning_rate def get_config(self): """Get the configuration of the learning rate schedule.""" return { 'initial_learning_rate': self.initial_learning_rate, 'hidden_size': self.hidden_size, 'warmup_steps': self.warmup_steps, } def create_optimizer(params: params_dict.ParamsDict): """Creates optimizer.""" lr_schedule = LearningRateSchedule(params.learning_rate, params.hidden_size, params.learning_rate_warmup_steps) return tf.keras.optimizers.Adam( learning_rate=lr_schedule, beta_1=params.adam_beta1, beta_2=params.adam_beta2, epsilon=params.adam_epsilon)
2,809
35.973684
79
py
models
models-master/official/projects/fffner/fffner.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The encoder used for FFFNER.""" import tensorflow as tf from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.nlp.configs import encoders from official.projects.fffner import fffner_encoder FFFNerEncoderConfig = encoders.BertEncoderConfig @base_config.bind(FFFNerEncoderConfig) def get_encoder(encoder_cfg: FFFNerEncoderConfig): """Gets the FFNerEncoder from the configurations.""" encoder = fffner_encoder.FFFNerEncoder( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, inner_dim=encoder_cfg.intermediate_size, inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation), output_dropout=encoder_cfg.dropout_rate, attention_dropout=encoder_cfg.attention_dropout_rate, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_width=encoder_cfg.embedding_size, norm_first=encoder_cfg.norm_first) return encoder
1,885
40
78
py
models
models-master/official/projects/fffner/fffner_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer-based encoder network for FFFNER.""" # pylint: disable=g-classes-have-attributes from typing import Any, Callable, Optional, Union from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Initializer = Union[str, tf.keras.initializers.Initializer] _Activation = Union[str, Callable[..., Any]] _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) class FFFNerEncoder(tf.keras.layers.Layer): """Transformer-based encoder network for FFFNER. The main difference is that it takes in additional positional arguments and returns last layer representations at those positions. Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. with_dense_inputs: Whether to accept dense embeddings as the input. return_attention_scores: Whether to add an additional output containing the attention scores of all transformer layers. This will be a list of length `num_layers`, and each element will be in the shape [batch_size, num_attention_heads, seq_dim, seq_dim]. """ def __init__( self, vocab_size: int, hidden_size: int = 768, num_layers: int = 12, num_attention_heads: int = 12, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 3072, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, with_dense_inputs: bool = False, return_attention_scores: bool = False, **kwargs): if 'dict_outputs' in kwargs: kwargs.pop('dict_outputs') if 'return_all_encoder_outputs' in kwargs: kwargs.pop('return_all_encoder_outputs') if 'intermediate_size' in kwargs: inner_dim = kwargs.pop('intermediate_size') if 'activation' in kwargs: inner_activation = kwargs.pop('activation') if 'dropout_rate' in kwargs: output_dropout = kwargs.pop('dropout_rate') if 'attention_dropout_rate' in kwargs: attention_dropout = kwargs.pop('attention_dropout_rate') super().__init__(**kwargs) self._output_range = output_range activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') self._transformer_layers = [] self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') self._num_layers = num_layers for i in range(num_layers): layer = layers.TransformerEncoderBlock( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, return_attention_scores=return_attention_scores, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % i) self._transformer_layers.append(layer) self._pooler_layer_is_entity = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform_is_entity') self._pooler_layer_entity_type = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform_entity_type') self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf.keras.activations.serialize(activation), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf.keras.initializers.serialize(initializer), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'with_dense_inputs': with_dense_inputs, 'return_attention_scores': return_attention_scores, } if with_dense_inputs: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_inputs=tf.keras.Input( shape=(None, embedding_width), dtype=tf.float32), dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), is_entity_token_pos=tf.keras.Input(shape=(None,), dtype=tf.int32), entity_type_token_pos=tf.keras.Input(shape=(None,), dtype=tf.int32)) else: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), is_entity_token_pos=tf.keras.Input(shape=(None,), dtype=tf.int32), entity_type_token_pos=tf.keras.Input(shape=(None,), dtype=tf.int32)) def call(self, inputs): word_embeddings = None if isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') word_embeddings = inputs.get('input_word_embeddings', None) dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) is_entity_token_pos = inputs.get('is_entity_token_pos', None) entity_type_token_pos = inputs.get('entity_type_token_pos', None) else: raise ValueError('Unexpected inputs type to %s.' % self.__class__) if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: mask = tf.concat([mask, dense_mask], axis=1) embeddings = self._get_embeddings(word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids) embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] attention_outputs = [] x = embeddings for i, layer in enumerate(self._transformer_layers): transformer_output_range = None if i == self._num_layers - 1: transformer_output_range = self._output_range x = layer([x, attention_mask], output_range=transformer_output_range) if self._config['return_attention_scores']: x, attention_scores = x attention_outputs.append(attention_scores) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] encoder_output_is_entity = tf.gather( last_encoder_output, indices=is_entity_token_pos, axis=1, batch_dims=1) encoder_output_entity_type = tf.gather( last_encoder_output, indices=entity_type_token_pos, axis=1, batch_dims=1) cls_output_is_entity = self._pooler_layer_is_entity( encoder_output_is_entity) cls_output_entity_type = self._pooler_layer_entity_type( encoder_output_entity_type) pooled_output = tf.concat([cls_output_is_entity, cls_output_entity_type], 1) output = dict( sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) if self._config['return_attention_scores']: output['attention_scores'] = attention_outputs return output def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer_is_entity(self): """The pooler dense layer for is entity classification after the transformer layers. """ return self._pooler_layer_is_entity @property def pooler_layer_entity_type(self): """The pooler dense layer for entity type classification after the transformer layers. """ return self._pooler_layer_entity_type @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config) def _get_embeddings(self, word_ids: tf.Tensor, type_ids: tf.Tensor, word_embeddings: Optional[tf.Tensor], dense_inputs: Optional[tf.Tensor], dense_type_ids: Optional[tf.Tensor]) -> tf.Tensor: if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Concat the dense embeddings at sequence end. word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) type_embeddings = self._type_embedding_layer(type_ids) # absolute position embeddings. position_embeddings = self._position_embedding_layer(word_embeddings) return word_embeddings + position_embeddings + type_embeddings
14,353
40.365994
90
py
models
models-master/official/projects/fffner/fffner_prediction.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FFFNER prediction task.""" import collections import dataclasses from absl import logging import numpy as np import tensorflow as tf from official.core import base_task from official.core import config_definitions as cfg from official.core import task_factory from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.nlp.configs import encoders from official.nlp.data import data_loader_factory from official.nlp.tasks import utils from official.projects.fffner import fffner_classifier METRIC_TYPES = frozenset( ['accuracy', 'matthews_corrcoef', 'pearson_spearman_corr']) @dataclasses.dataclass class FFFNerModelConfig(base_config.Config): """A classifier/regressor configuration.""" num_classes_is_entity: int = 0 num_classes_entity_type: int = 0 use_encoder_pooler: bool = True encoder: encoders.EncoderConfig = dataclasses.field( default_factory=encoders.EncoderConfig ) @dataclasses.dataclass class FFFNerPredictionConfig(cfg.TaskConfig): """The model config.""" # At most one of `init_checkpoint` and `hub_module_url` can # be specified. init_checkpoint: str = '' init_cls_pooler: bool = False hub_module_url: str = '' metric_type: str = 'accuracy' # Defines the concrete model config at instantiation time. model: FFFNerModelConfig = dataclasses.field( default_factory=FFFNerModelConfig ) train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig) validation_data: cfg.DataConfig = dataclasses.field( default_factory=cfg.DataConfig ) @task_factory.register_task_cls(FFFNerPredictionConfig) class FFFNerTask(base_task.Task): """Task object for FFFNer.""" def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None): super().__init__(params, logging_dir, name=name) if params.metric_type not in METRIC_TYPES: raise ValueError('Invalid metric_type: {}'.format(params.metric_type)) self.metric_type = params.metric_type self.label_field_is_entity = 'is_entity_label' self.label_field_entity_type = 'entity_type_label' def build_model(self): if self.task_config.hub_module_url and self.task_config.init_checkpoint: raise ValueError('At most one of `hub_module_url` and ' '`init_checkpoint` can be specified.') if self.task_config.hub_module_url: encoder_network = utils.get_encoder_from_hub( self.task_config.hub_module_url) else: encoder_network = encoders.build_encoder(self.task_config.model.encoder) encoder_cfg = self.task_config.model.encoder.get() if self.task_config.model.encoder.type == 'xlnet': assert False, 'Not supported yet' else: return fffner_classifier.FFFNerClassifier( # encoder_network.inputs network=encoder_network, num_classes_is_entity=self.task_config.model.num_classes_is_entity, num_classes_entity_type=self.task_config.model .num_classes_entity_type, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), use_encoder_pooler=self.task_config.model.use_encoder_pooler) def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: label_ids_is_entity = labels[self.label_field_is_entity] label_ids_entity_type = labels[self.label_field_entity_type] loss_is_entity = tf.keras.losses.sparse_categorical_crossentropy( label_ids_is_entity, tf.cast(model_outputs[0], tf.float32), from_logits=True) loss_entity_type = tf.keras.losses.sparse_categorical_crossentropy( label_ids_entity_type, tf.cast(model_outputs[1], tf.float32), from_logits=True) loss = loss_is_entity + loss_entity_type if aux_losses: loss += tf.add_n(aux_losses) return tf_utils.safe_mean(loss) def build_inputs(self, params, input_context=None): """Returns tf.data.Dataset for sentence_prediction task.""" if params.input_path == 'dummy': def dummy_data(_): dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) x = dict( input_word_ids=dummy_ids, input_mask=dummy_ids, input_type_ids=dummy_ids, is_entity_token_pos=tf.zeros((1, 1), dtype=tf.int32), entity_type_token_pos=tf.ones((1, 1), dtype=tf.int32)) x[self.label_field_is_entity] = tf.zeros((1, 1), dtype=tf.int32) x[self.label_field_entity_type] = tf.zeros((1, 1), dtype=tf.int32) return x dataset = tf.data.Dataset.range(1) dataset = dataset.repeat() dataset = dataset.map( dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset return data_loader_factory.get_data_loader(params).load(input_context) def build_metrics(self, training=None): del training metrics = [ tf.keras.metrics.SparseCategoricalAccuracy( name='cls_accuracy_is_entity'), tf.keras.metrics.SparseCategoricalAccuracy( name='cls_accuracy_entity_type'), ] return metrics def process_metrics(self, metrics, labels, model_outputs): for metric in metrics: if metric.name == 'cls_accuracy_is_entity': metric.update_state(labels[self.label_field_is_entity], model_outputs[0]) if metric.name == 'cls_accuracy_entity_type': metric.update_state(labels[self.label_field_entity_type], model_outputs[1]) def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): compiled_metrics.update_state(labels[self.label_field_is_entity], model_outputs[0]) compiled_metrics.update_state(labels[self.label_field_entity_type], model_outputs[1]) def validation_step(self, inputs, model: tf.keras.Model, metrics=None): features, labels = inputs, inputs outputs = self.inference_step(features, model) loss = self.build_losses( labels=labels, model_outputs=outputs, aux_losses=model.losses) logs = {self.loss: loss} if metrics: self.process_metrics(metrics, labels, outputs) if model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics or []}) logs.update({m.name: m.result() for m in model.metrics}) logs.update({ 'sentence_prediction_is_entity': outputs[0], 'sentence_prediction_entity_type': outputs[1], 'labels_is_entity': labels[self.label_field_is_entity], 'labels_entity_type': labels[self.label_field_entity_type], 'id': labels['example_id'], 'sentence_id': labels['sentence_id'], 'span_start': labels['span_start'], 'span_end': labels['span_end'] }) return logs def aggregate_logs(self, state=None, step_outputs=None): if state is None: state = { 'sentence_prediction_is_entity': [], 'sentence_prediction_entity_type': [], 'labels_is_entity': [], 'labels_entity_type': [], 'ids': [], 'sentence_id': [], 'span_start': [], 'span_end': [] } state['sentence_prediction_is_entity'].append( np.concatenate( [v.numpy() for v in step_outputs['sentence_prediction_is_entity']], axis=0)) state['sentence_prediction_entity_type'].append( np.concatenate([ v.numpy() for v in step_outputs['sentence_prediction_entity_type'] ], axis=0)) state['labels_is_entity'].append( np.concatenate([v.numpy() for v in step_outputs['labels_is_entity']], axis=0)) state['labels_entity_type'].append( np.concatenate([v.numpy() for v in step_outputs['labels_entity_type']], axis=0)) state['ids'].append( np.concatenate([v.numpy() for v in step_outputs['id']], axis=0)) state['sentence_id'].append( np.concatenate([v.numpy() for v in step_outputs['sentence_id']], axis=0)) state['span_start'].append( np.concatenate([v.numpy() for v in step_outputs['span_start']], axis=0)) state['span_end'].append( np.concatenate([v.numpy() for v in step_outputs['span_end']], axis=0)) return state def reduce_aggregated_logs(self, aggregated_logs, global_step=None): sentence_prediction_is_entity = np.concatenate( aggregated_logs['sentence_prediction_is_entity'], axis=0) sentence_prediction_is_entity = np.reshape( sentence_prediction_is_entity, (-1, self.task_config.model.num_classes_is_entity)) sentence_prediction_entity_type = np.concatenate( aggregated_logs['sentence_prediction_entity_type'], axis=0) sentence_prediction_entity_type = np.reshape( sentence_prediction_entity_type, (-1, self.task_config.model.num_classes_entity_type)) labels_is_entity = np.concatenate( aggregated_logs['labels_is_entity'], axis=0) labels_is_entity = np.reshape(labels_is_entity, -1) labels_entity_type = np.concatenate( aggregated_logs['labels_entity_type'], axis=0) labels_entity_type = np.reshape(labels_entity_type, -1) ids = np.concatenate(aggregated_logs['ids'], axis=0) ids = np.reshape(ids, -1) sentence_id = np.concatenate(aggregated_logs['sentence_id'], axis=0) sentence_id = np.reshape(sentence_id, -1) span_start = np.concatenate(aggregated_logs['span_start'], axis=0) span_start = np.reshape(span_start, -1) span_end = np.concatenate(aggregated_logs['span_end'], axis=0) span_end = np.reshape(span_end, -1) def resolve(length, spans, prediction_confidence): used = [False] * length spans = sorted( spans, key=lambda x: prediction_confidence[(x[0], x[1])], reverse=True) real_spans = [] for span_start, span_end, ent_type in spans: fill = False for s in range(span_start, span_end + 1): if used[s]: fill = True break if not fill: real_spans.append((span_start, span_end, ent_type)) for s in range(span_start, span_end + 1): used[s] = True return real_spans def get_p_r_f(truth, pred): n_pred = len(pred) n_truth = len(truth) n_correct = len(set(pred) & set(truth)) precision = 1. * n_correct / n_pred if n_pred != 0 else 0.0 recall = 1. * n_correct / n_truth if n_truth != 0 else 0.0 f1 = 2 * precision * recall / ( precision + recall) if precision + recall != 0.0 else 0.0 return { 'n_pred': n_pred, 'n_truth': n_truth, 'n_correct': n_correct, 'precision': precision, 'recall': recall, 'f1': f1, } def softmax(x): x = np.array(x) e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) per_sid_results = collections.defaultdict(list) for _, sent_id, sp_start, sp_end, is_entity_label, is_entity_logit, entity_type_label, entity_type_logit in zip( ids, sentence_id, span_start, span_end, labels_is_entity, sentence_prediction_is_entity, labels_entity_type, sentence_prediction_entity_type): if sent_id > 0: per_sid_results[sent_id].append( (sp_start, sp_end, is_entity_label, is_entity_logit, entity_type_label, entity_type_logit)) ground_truth = [] prediction_is_entity = [] prediction_entity_type = [] for key in sorted(list(per_sid_results.keys())): results = per_sid_results[key] gt_entities = [] predictied_entities = [] prediction_confidence = {} prediction_confidence_type = {} length = 0 for span_start, span_end, ground_truth_span, prediction_span, ground_truth_type, prediction_type in results: if ground_truth_span == 1: gt_entities.append((span_start, span_end, ground_truth_type)) if prediction_span[1] > prediction_span[0]: predictied_entities.append( (span_start, span_end, np.argmax(prediction_type).item())) prediction_confidence[(span_start, span_end)] = max(softmax(prediction_span)) prediction_confidence_type[(span_start, span_end)] = max(softmax(prediction_type)) length = max(length, span_end) length += 1 ground_truth.extend([(key, *x) for x in gt_entities]) prediction_is_entity.extend([(key, *x) for x in predictied_entities]) resolved_predicted = resolve(length, predictied_entities, prediction_confidence) prediction_entity_type.extend([(key, *x) for x in resolved_predicted]) raw = get_p_r_f(ground_truth, prediction_is_entity) resolved = get_p_r_f(ground_truth, prediction_entity_type) return { 'raw_f1': raw['f1'], 'raw_precision': raw['precision'], 'raw_recall': raw['recall'], 'resolved_f1': resolved['f1'], 'resolved_precision': resolved['precision'], 'resolved_recall': resolved['recall'], 'overall_f1': raw['f1'] + resolved['f1'], } def initialize(self, model): """Load a pretrained checkpoint (if exists) and then train from iter 0.""" ckpt_dir_or_file = self.task_config.init_checkpoint logging.info('Trying to load pretrained checkpoint from %s', ckpt_dir_or_file) if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) if not ckpt_dir_or_file: logging.info('No checkpoint file found from %s. Will not load.', ckpt_dir_or_file) return pretrain2finetune_mapping = { 'encoder': model.checkpoint_items['encoder'], } if self.task_config.init_cls_pooler: # This option is valid when use_encoder_pooler is false. pretrain2finetune_mapping[ 'next_sentence.pooler_dense'] = model.checkpoint_items[ 'sentence_prediction.pooler_dense'] ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file)
15,128
39.560322
116
py
models
models-master/official/projects/fffner/fffner_classifier.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FFF-NER special token classifier.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class FFFNerClassifier(tf.keras.Model): """Classifier model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertClassifier allows a user to pass in a transformer stack, and instantiates a classification network based on the passed `num_classes` argument. If `num_classes` is set to 1, a regression network is instantiated. *Note* that the model is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. dropout_rate: The dropout probability of the cls head. use_encoder_pooler: Whether to use the pooler layer pre-defined inside the encoder. head_name_is_entity: Name of the classification head. head_name_entity_type: Name of the classification head. """ def __init__(self, network, num_classes_is_entity, num_classes_entity_type, initializer='glorot_uniform', dropout_rate=0.1, use_encoder_pooler=True, head_name_is_entity='fffner_prediction_is_entity', head_name_entity_type='fffner_prediction_entity_type', cls_head=None, **kwargs): self.num_classes_is_entity = num_classes_is_entity self.num_classes_entity_type = num_classes_entity_type self.head_name_is_entity = head_name_is_entity self.head_name_entity_type = head_name_entity_type self.initializer = initializer self.use_encoder_pooler = use_encoder_pooler assert use_encoder_pooler, ('Customized pooling & classification function ' 'is used') # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs outputs = network(inputs) if isinstance(outputs, list): cls_inputs = outputs[1] else: cls_inputs = outputs['pooled_output'] cls_inputs = tf.keras.layers.Dropout(rate=dropout_rate)(cls_inputs) classifier_is_entity = layers.ClassificationHead( inner_dim=0 if use_encoder_pooler else cls_inputs.shape[-1], num_classes=num_classes_is_entity, initializer=initializer, dropout_rate=dropout_rate, name=head_name_is_entity) classifier_entity_type = layers.ClassificationHead( inner_dim=0 if use_encoder_pooler else cls_inputs.shape[-1], num_classes=num_classes_entity_type, initializer=initializer, dropout_rate=dropout_rate, name=head_name_entity_type) predictions_is_entity = classifier_is_entity(cls_inputs[:, 0, :]) predictions_entity_type = classifier_entity_type(cls_inputs[:, 1, :]) super().__init__( inputs=inputs, outputs=[predictions_is_entity, predictions_entity_type], **kwargs) self._network = network self._cls_head = cls_head config_dict = self._make_config_dict() # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.classifier_is_entity = classifier_is_entity self.classifier_entity_type = classifier_entity_type @property def checkpoint_items(self): items = dict(encoder=self._network) if hasattr(self.classifier_is_entity, 'checkpoint_items'): for key, item in self.classifier_is_entity.checkpoint_items.items(): items['.'.join([self.classifier_is_entity.name, key])] = item if hasattr(self.classifier_entity_type, 'checkpoint_items'): for key, item in self.classifier_entity_type.checkpoint_items.items(): items['.'.join([self.classifier_entity_type.name, key])] = item return items def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def _make_config_dict(self): return { 'network': self._network, 'num_classes_is_entity': self.num_classes_is_entity, 'num_classes_entity_type': self.num_classes_entity_type, 'head_name_is_entity': self.head_name_is_entity, 'head_name_entity_type': self.head_name_entity_type, 'initializer': self.initializer, 'use_encoder_pooler': self.use_encoder_pooler, }
6,139
41.054795
80
py