repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
models
models-master/official/vision/configs/backbones.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Backbones configurations.""" import dataclasses from typing import List, Optional, Tuple from official.modeling import hyperparams @dataclasses.dataclass class Transformer(hyperparams.Config): """Transformer config.""" mlp_dim: int = 1 num_heads: int = 1 num_layers: int = 1 attention_dropout_rate: float = 0.0 dropout_rate: float = 0.0 @dataclasses.dataclass class VisionTransformer(hyperparams.Config): """VisionTransformer config.""" model_name: str = 'vit-b16' # pylint: disable=line-too-long pooler: str = 'token' # 'token', 'gap' or 'none'. If set to 'token', an extra classification token is added to sequence. # pylint: enable=line-too-long representation_size: int = 0 hidden_size: int = 1 patch_size: int = 16 transformer: Transformer = dataclasses.field(default_factory=Transformer) init_stochastic_depth_rate: float = 0.0 original_init: bool = True pos_embed_shape: Optional[Tuple[int, int]] = None # If output encoded tokens sequence when pooler is `none`. output_encoded_tokens: bool = True # If output encoded tokens 2D feature map. output_2d_feature_maps: bool = False # Adding Layerscale to each Encoder block https://arxiv.org/abs/2204.07118 layer_scale_init_value: float = 0.0 # Transformer encoder spatial partition dimensions. transformer_partition_dims: Optional[Tuple[int, int, int, int]] = None @dataclasses.dataclass class ResNet(hyperparams.Config): """ResNet config.""" model_id: int = 50 depth_multiplier: float = 1.0 stem_type: str = 'v0' se_ratio: float = 0.0 stochastic_depth_drop_rate: float = 0.0 scale_stem: bool = True resnetd_shortcut: bool = False replace_stem_max_pool: bool = False bn_trainable: bool = True @dataclasses.dataclass class DilatedResNet(hyperparams.Config): """DilatedResNet config.""" model_id: int = 50 output_stride: int = 16 multigrid: Optional[List[int]] = None stem_type: str = 'v0' last_stage_repeats: int = 1 se_ratio: float = 0.0 stochastic_depth_drop_rate: float = 0.0 resnetd_shortcut: bool = False replace_stem_max_pool: bool = False @dataclasses.dataclass class EfficientNet(hyperparams.Config): """EfficientNet config.""" model_id: str = 'b0' se_ratio: float = 0.0 stochastic_depth_drop_rate: float = 0.0 @dataclasses.dataclass class MobileNet(hyperparams.Config): """Mobilenet config.""" model_id: str = 'MobileNetV2' filter_size_scale: float = 1.0 stochastic_depth_drop_rate: float = 0.0 output_stride: Optional[int] = None output_intermediate_endpoints: bool = False @dataclasses.dataclass class SpineNet(hyperparams.Config): """SpineNet config.""" model_id: str = '49' stochastic_depth_drop_rate: float = 0.0 min_level: int = 3 max_level: int = 7 @dataclasses.dataclass class SpineNetMobile(hyperparams.Config): """SpineNet config.""" model_id: str = '49' stochastic_depth_drop_rate: float = 0.0 se_ratio: float = 0.2 expand_ratio: int = 6 min_level: int = 3 max_level: int = 7 # If use_keras_upsampling_2d is True, model uses UpSampling2D keras layer # instead of optimized custom TF op. It makes model be more keras style. We # set this flag to True when we apply QAT from model optimization toolkit # that requires the model should use keras layers. use_keras_upsampling_2d: bool = False @dataclasses.dataclass class RevNet(hyperparams.Config): """RevNet config.""" # Specifies the depth of RevNet. model_id: int = 56 @dataclasses.dataclass class MobileDet(hyperparams.Config): """Mobiledet config.""" model_id: str = 'MobileDetCPU' filter_size_scale: float = 1.0 @dataclasses.dataclass class Backbone(hyperparams.OneOfConfig): """Configuration for backbones. Attributes: type: 'str', type of backbone be used, one of the fields below. resnet: resnet backbone config. dilated_resnet: dilated resnet backbone for semantic segmentation config. revnet: revnet backbone config. efficientnet: efficientnet backbone config. spinenet: spinenet backbone config. spinenet_mobile: mobile spinenet backbone config. mobilenet: mobilenet backbone config. mobiledet: mobiledet backbone config. vit: vision transformer backbone config. """ type: Optional[str] = None resnet: ResNet = dataclasses.field(default_factory=ResNet) dilated_resnet: DilatedResNet = dataclasses.field( default_factory=DilatedResNet ) revnet: RevNet = dataclasses.field(default_factory=RevNet) efficientnet: EfficientNet = dataclasses.field(default_factory=EfficientNet) spinenet: SpineNet = dataclasses.field(default_factory=SpineNet) spinenet_mobile: SpineNetMobile = dataclasses.field( default_factory=SpineNetMobile ) mobilenet: MobileNet = dataclasses.field(default_factory=MobileNet) mobiledet: MobileDet = dataclasses.field(default_factory=MobileDet) vit: VisionTransformer = dataclasses.field(default_factory=VisionTransformer)
5,547
31.255814
123
py
models
models-master/official/vision/configs/decoders.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Decoders configurations.""" import dataclasses from typing import List, Optional # Import libraries from official.modeling import hyperparams @dataclasses.dataclass class Identity(hyperparams.Config): """Identity config.""" pass @dataclasses.dataclass class FPN(hyperparams.Config): """FPN config.""" num_filters: int = 256 fusion_type: str = 'sum' use_separable_conv: bool = False use_keras_layer: bool = False @dataclasses.dataclass class NASFPN(hyperparams.Config): """NASFPN config.""" num_filters: int = 256 num_repeats: int = 5 use_separable_conv: bool = False @dataclasses.dataclass class ASPP(hyperparams.Config): """ASPP config.""" level: int = 4 dilation_rates: List[int] = dataclasses.field(default_factory=list) dropout_rate: float = 0.0 num_filters: int = 256 use_depthwise_convolution: bool = False pool_kernel_size: Optional[List[int]] = None # Use global average pooling. spp_layer_version: str = 'v1' output_tensor: bool = False @dataclasses.dataclass class Decoder(hyperparams.OneOfConfig): """Configuration for decoders. Attributes: type: 'str', type of decoder be used, one of the fields below. fpn: fpn config. """ type: Optional[str] = None fpn: FPN = dataclasses.field(default_factory=FPN) nasfpn: NASFPN = dataclasses.field(default_factory=NASFPN) identity: Identity = dataclasses.field(default_factory=Identity) aspp: ASPP = dataclasses.field(default_factory=ASPP)
2,080
27.506849
77
py
models
models-master/official/vision/data/tfrecord_lib_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfrecord_lib.""" import os from absl import flags from absl.testing import parameterized import tensorflow as tf from official.vision.data import create_coco_tf_record as create_coco_tf_record_lib from official.vision.data import tfrecord_lib FLAGS = flags.FLAGS def process_sample(x): d = {'x': x} return tf.train.Example(features=tf.train.Features(feature=d)), 0 def parse_function(example_proto): feature_description = { 'x': tf.io.FixedLenFeature([], tf.int64, default_value=-1) } return tf.io.parse_single_example(example_proto, feature_description) class TfrecordLibTest(parameterized.TestCase): def test_write_tf_record_dataset(self): data = [(tfrecord_lib.convert_to_feature(i),) for i in range(17)] path = os.path.join(FLAGS.test_tmpdir, 'train') tfrecord_lib.write_tf_record_dataset( path, data, process_sample, 3, multiple_processes=0) tfrecord_files = tf.io.gfile.glob(path + '*') self.assertLen(tfrecord_files, 3) dataset = tf.data.TFRecordDataset(tfrecord_files) dataset = dataset.map(parse_function) read_values = set(d['x'] for d in dataset.as_numpy_iterator()) self.assertSetEqual(read_values, set(range(17))) def test_convert_to_feature_float(self): proto = tfrecord_lib.convert_to_feature(0.0) self.assertEqual(proto.float_list.value[0], 0.0) def test_convert_to_feature_int(self): proto = tfrecord_lib.convert_to_feature(0) self.assertEqual(proto.int64_list.value[0], 0) def test_convert_to_feature_bytes(self): proto = tfrecord_lib.convert_to_feature(b'123') self.assertEqual(proto.bytes_list.value[0], b'123') def test_convert_to_feature_float_list(self): proto = tfrecord_lib.convert_to_feature([0.0, 1.0]) self.assertSequenceAlmostEqual(proto.float_list.value, [0.0, 1.0]) def test_convert_to_feature_int_list(self): proto = tfrecord_lib.convert_to_feature([0, 1]) self.assertSequenceAlmostEqual(proto.int64_list.value, [0, 1]) def test_convert_to_feature_bytes_list(self): proto = tfrecord_lib.convert_to_feature([b'123', b'456']) self.assertSequenceAlmostEqual(proto.bytes_list.value, [b'123', b'456']) def test_obj_annotation_tf_example(self): images = [ { 'id': 0, 'file_name': 'example1.jpg', 'height': 512, 'width': 512, }, { 'id': 1, 'file_name': 'example2.jpg', 'height': 512, 'width': 512, }, ] img_to_obj_annotation = { 0: [{ 'id': 0, 'image_id': 0, 'category_id': 1, 'bbox': [3, 1, 511, 510], 'area': 260610.00, 'segmentation': [], 'iscrowd': 0, }], 1: [{ 'id': 1, 'image_id': 1, 'category_id': 1, 'bbox': [1, 1, 100, 150], 'area': 15000.00, 'segmentation': [], 'iscrowd': 0, }], } id_to_name_map = { 0: 'Super-Class', 1: 'Class-1', } temp_dir = FLAGS.test_tmpdir image_dir = os.path.join(temp_dir, 'data') if not os.path.exists(image_dir): os.mkdir(image_dir) for image in images: image_path = os.path.join(image_dir, image['file_name']) tf.keras.utils.save_img( image_path, tf.ones(shape=(image['height'], image['width'], 3)).numpy(), ) output_path = os.path.join(image_dir, 'train') coco_annotations_iter = create_coco_tf_record_lib.generate_annotations( images=images, image_dirs=[image_dir], panoptic_masks_dir=None, img_to_obj_annotation=img_to_obj_annotation, img_to_caption_annotation=None, img_to_panoptic_annotation=None, is_category_thing=None, id_to_name_map=id_to_name_map, include_panoptic_masks=False, include_masks=False, ) tfrecord_lib.write_tf_record_dataset( output_path, coco_annotations_iter, create_coco_tf_record_lib.create_tf_example, 1, multiple_processes=0, ) tfrecord_files = tf.io.gfile.glob(output_path + '*') self.assertLen(tfrecord_files, 1) ds = tf.data.TFRecordDataset(tfrecord_files) assertion_count = 0 for _ in ds: assertion_count += 1 self.assertEqual(assertion_count, 2) if __name__ == '__main__': tf.test.main()
5,071
27.494382
83
py
models
models-master/official/vision/modeling/video_classification_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build video classification models.""" from typing import Any, Mapping, Optional, Union, List, Text import tensorflow as tf layers = tf.keras.layers @tf.keras.utils.register_keras_serializable(package='Vision') class VideoClassificationModel(tf.keras.Model): """A video classification class builder.""" def __init__( self, backbone: tf.keras.Model, num_classes: int, input_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None, dropout_rate: float = 0.0, aggregate_endpoints: bool = False, kernel_initializer: str = 'random_uniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, require_endpoints: Optional[List[Text]] = None, **kwargs): """Video Classification initialization function. Args: backbone: a 3d backbone network. num_classes: `int` number of classes in classification task. input_specs: `tf.keras.layers.InputSpec` specs of the input tensor. dropout_rate: `float` rate for dropout regularization. aggregate_endpoints: `bool` aggregate all end ponits or only use the final end point. kernel_initializer: kernel initializer for the dense layer. kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to None. bias_regularizer: tf.keras.regularizers.Regularizer object. Default to None. require_endpoints: the required endpoints for prediction. If None or empty, then only uses the final endpoint. **kwargs: keyword arguments to be passed. """ if not input_specs: input_specs = { 'image': layers.InputSpec(shape=[None, None, None, None, 3]) } self._self_setattr_tracking = False self._config_dict = { 'backbone': backbone, 'num_classes': num_classes, 'input_specs': input_specs, 'dropout_rate': dropout_rate, 'aggregate_endpoints': aggregate_endpoints, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, 'require_endpoints': require_endpoints, } self._input_specs = input_specs self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._backbone = backbone inputs = { k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items() } endpoints = backbone(inputs['image']) if aggregate_endpoints: pooled_feats = [] for endpoint in endpoints.values(): x_pool = tf.keras.layers.GlobalAveragePooling3D()(endpoint) pooled_feats.append(x_pool) x = tf.concat(pooled_feats, axis=1) else: if not require_endpoints: # Uses the last endpoint for prediction. x = endpoints[max(endpoints.keys())] x = tf.keras.layers.GlobalAveragePooling3D()(x) else: # Concats all the required endpoints for prediction. outputs = [] for name in require_endpoints: x = endpoints[name] x = tf.keras.layers.GlobalAveragePooling3D()(x) outputs.append(x) x = tf.concat(outputs, axis=1) x = tf.keras.layers.Dropout(dropout_rate)(x) x = tf.keras.layers.Dense( num_classes, kernel_initializer=kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) super(VideoClassificationModel, self).__init__( inputs=inputs, outputs=x, **kwargs) @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" return dict(backbone=self.backbone) @property def backbone(self) -> tf.keras.Model: return self._backbone def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
4,703
35.465116
78
py
models
models-master/official/vision/modeling/classification_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build classification models.""" from typing import Any, Mapping, Optional # Import libraries import tensorflow as tf layers = tf.keras.layers @tf.keras.utils.register_keras_serializable(package='Vision') class ClassificationModel(tf.keras.Model): """A classification class builder.""" def __init__( self, backbone: tf.keras.Model, num_classes: int, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), dropout_rate: float = 0.0, kernel_initializer: str = 'random_uniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, add_head_batch_norm: bool = False, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, skip_logits_layer: bool = False, **kwargs): """Classification initialization function. Args: backbone: a backbone network. num_classes: `int` number of classes in classification task. input_specs: `tf.keras.layers.InputSpec` specs of the input tensor. dropout_rate: `float` rate for dropout regularization. kernel_initializer: kernel initializer for the dense layer. kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to None. bias_regularizer: tf.keras.regularizers.Regularizer object. Default to None. add_head_batch_norm: `bool` whether to add a batch normalization layer before pool. use_sync_bn: `bool` if True, use synchronized batch normalization. norm_momentum: `float` normalization momentum for the moving average. norm_epsilon: `float` small float added to variance to avoid dividing by zero. skip_logits_layer: `bool`, whether to skip the prediction layer. **kwargs: keyword arguments to be passed. """ norm = tf.keras.layers.BatchNormalization axis = -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1 inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name) endpoints = backbone(inputs) x = endpoints[max(endpoints.keys())] if add_head_batch_norm: x = norm( axis=axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn, )(x) # Depending on the backbone type, backbone's output can be # [batch_size, height, weight, channel_size] or # [batch_size, token_size, hidden_size]. if len(x.shape) == 4: x = tf.keras.layers.GlobalAveragePooling2D()(x) elif len(x.shape) == 3: x = tf.keras.layers.GlobalAveragePooling1D()(x) if not skip_logits_layer: x = tf.keras.layers.Dropout(dropout_rate)(x) x = tf.keras.layers.Dense( num_classes, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)( x) super(ClassificationModel, self).__init__( inputs=inputs, outputs=x, **kwargs) self._config_dict = { 'backbone': backbone, 'num_classes': num_classes, 'input_specs': input_specs, 'dropout_rate': dropout_rate, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, 'add_head_batch_norm': add_head_batch_norm, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, } self._input_specs = input_specs self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._backbone = backbone self._norm = norm @property def checkpoint_items(self) -> Mapping[str, tf.keras.Model]: """Returns a dictionary of items to be additionally checkpointed.""" return dict(backbone=self.backbone) @property def backbone(self) -> tf.keras.Model: return self._backbone def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
4,857
35.526316
79
py
models
models-master/official/vision/modeling/factory_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for factory.py.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.configs import backbones from official.vision.configs import backbones_3d from official.vision.configs import image_classification as classification_cfg from official.vision.configs import maskrcnn as maskrcnn_cfg from official.vision.configs import retinanet as retinanet_cfg from official.vision.configs import video_classification as video_classification_cfg from official.vision.modeling import factory from official.vision.modeling import factory_3d class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (224, 224), 5e-5), ('resnet', (224, 224), None), ('resnet', (None, None), 5e-5), ('resnet', (None, None), None), ) def test_builder(self, backbone_type, input_size, weight_decay): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = classification_cfg.ImageClassificationModel( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) _ = factory.build_classification_model( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) class MaskRCNNBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (640, 640)), ('resnet', (None, None)), ) def test_builder(self, backbone_type, input_size): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = maskrcnn_cfg.MaskRCNN( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type)) l2_regularizer = tf.keras.regularizers.l2(5e-5) _ = factory.build_maskrcnn( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (640, 640), False), ('resnet', (None, None), True), ) def test_builder(self, backbone_type, input_size, has_att_heads): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) if has_att_heads: attribute_heads_config = [ retinanet_cfg.AttributeHead(name='att1'), retinanet_cfg.AttributeHead( name='att2', type='classification', size=2), ] else: attribute_heads_config = None model_config = retinanet_cfg.RetinaNet( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type), head=retinanet_cfg.RetinaNetHead( attribute_heads=attribute_heads_config)) l2_regularizer = tf.keras.regularizers.l2(5e-5) _ = factory.build_retinanet( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) if has_att_heads: self.assertEqual( model_config.head.attribute_heads[0].as_dict(), dict( name='att1', type='regression', size=1, prediction_tower_name='', num_convs=None, num_filters=None, ), ) self.assertEqual( model_config.head.attribute_heads[1].as_dict(), dict( name='att2', type='classification', size=2, prediction_tower_name='', num_convs=None, num_filters=None, ), ) class VideoClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet_3d', (8, 224, 224), 5e-5), ('resnet_3d', (None, None, None), 5e-5), ) def test_builder(self, backbone_type, input_size, weight_decay): input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], input_size[2], 3]) model_config = video_classification_cfg.VideoClassificationModel( backbone=backbones_3d.Backbone3D(type=backbone_type)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) _ = factory_3d.build_video_classification_model( input_specs=input_specs, model_config=model_config, num_classes=2, l2_regularizer=l2_regularizer) if __name__ == '__main__': tf.test.main()
5,286
34.246667
84
py
models
models-master/official/vision/modeling/maskrcnn_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for maskrcnn_model.py.""" import os # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling import maskrcnn_model from official.vision.modeling.backbones import resnet from official.vision.modeling.decoders import fpn from official.vision.modeling.heads import dense_prediction_heads from official.vision.modeling.heads import instance_heads from official.vision.modeling.layers import detection_generator from official.vision.modeling.layers import mask_sampler from official.vision.modeling.layers import roi_aligner from official.vision.modeling.layers import roi_generator from official.vision.modeling.layers import roi_sampler from official.vision.ops import anchor class MaskRCNNModelTest(parameterized.TestCase, tf.test.TestCase): @combinations.generate( combinations.combine( include_mask=[True, False], use_separable_conv=[True, False], build_anchor_boxes=[True, False], use_outer_boxes=[True, False], is_training=[True, False])) def test_build_model(self, include_mask, use_separable_conv, build_anchor_boxes, use_outer_boxes, is_training): num_classes = 3 min_level = 3 max_level = 7 num_scales = 3 aspect_ratios = [1.0] anchor_size = 3 resnet_model_id = 50 num_anchors_per_location = num_scales * len(aspect_ratios) image_size = 384 images = np.random.rand(2, image_size, image_size, 3) image_shape = np.array([[image_size, image_size], [image_size, image_size]]) if build_anchor_boxes: anchor_boxes = anchor.Anchor( min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=3, image_size=(image_size, image_size)).multilevel_boxes for l in anchor_boxes: anchor_boxes[l] = tf.tile( tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1]) else: anchor_boxes = None backbone = resnet.ResNet(model_id=resnet_model_id) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level, use_separable_conv=use_separable_conv) rpn_head = dense_prediction_heads.RPNHead( min_level=min_level, max_level=max_level, num_anchors_per_location=num_anchors_per_location, num_convs=1) detection_head = instance_heads.DetectionHead(num_classes=num_classes) roi_generator_obj = roi_generator.MultilevelROIGenerator() roi_sampler_obj = roi_sampler.ROISampler() roi_aligner_obj = roi_aligner.MultilevelROIAligner() detection_generator_obj = detection_generator.DetectionGenerator() if include_mask: mask_head = instance_heads.MaskHead( num_classes=num_classes, upsample_factor=2) mask_sampler_obj = mask_sampler.MaskSampler( mask_target_size=28, num_sampled_masks=1) mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14) else: mask_head = None mask_sampler_obj = None mask_roi_aligner_obj = None model = maskrcnn_model.MaskRCNNModel( backbone, decoder, rpn_head, detection_head, roi_generator_obj, roi_sampler_obj, roi_aligner_obj, detection_generator_obj, mask_head, mask_sampler_obj, mask_roi_aligner_obj, min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=anchor_size) gt_boxes = np.array( [[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]], [[100, 100, 150, 150], [-1, -1, -1, -1], [-1, -1, -1, -1]]], dtype=np.float32) gt_outer_boxes = None if use_outer_boxes: gt_outer_boxes = np.array( [[[11, 11, 16.5, 16.5], [2.75, 2.75, 8.25, 8.25], [-1, -1, -1, -1]], [[110, 110, 165, 165], [-1, -1, -1, -1], [-1, -1, -1, -1]]], dtype=np.float32) gt_classes = np.array([[2, 1, -1], [1, -1, -1]], dtype=np.int32) if include_mask: gt_masks = np.ones((2, 3, 100, 100)) else: gt_masks = None # Results will be checked in test_forward. _ = model( images, image_shape, anchor_boxes, gt_boxes, gt_classes, gt_masks, gt_outer_boxes, training=is_training) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], include_mask=[True, False], build_anchor_boxes=[True, False], use_cascade_heads=[True, False], training=[True, False], )) def test_forward(self, strategy, include_mask, build_anchor_boxes, training, use_cascade_heads): num_classes = 3 min_level = 3 max_level = 4 num_scales = 3 aspect_ratios = [1.0] anchor_size = 3 if use_cascade_heads: cascade_iou_thresholds = [0.6] class_agnostic_bbox_pred = True cascade_class_ensemble = True else: cascade_iou_thresholds = None class_agnostic_bbox_pred = False cascade_class_ensemble = False image_size = (256, 256) images = np.random.rand(2, image_size[0], image_size[1], 3) image_shape = np.array([[224, 100], [100, 224]]) with strategy.scope(): if build_anchor_boxes: anchor_boxes = anchor.Anchor( min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=anchor_size, image_size=image_size).multilevel_boxes else: anchor_boxes = None num_anchors_per_location = len(aspect_ratios) * num_scales input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3]) backbone = resnet.ResNet(model_id=50, input_specs=input_specs) decoder = fpn.FPN( min_level=min_level, max_level=max_level, input_specs=backbone.output_specs) rpn_head = dense_prediction_heads.RPNHead( min_level=min_level, max_level=max_level, num_anchors_per_location=num_anchors_per_location) detection_head = instance_heads.DetectionHead( num_classes=num_classes, class_agnostic_bbox_pred=class_agnostic_bbox_pred) roi_generator_obj = roi_generator.MultilevelROIGenerator() roi_sampler_cascade = [] roi_sampler_obj = roi_sampler.ROISampler() roi_sampler_cascade.append(roi_sampler_obj) if cascade_iou_thresholds: for iou in cascade_iou_thresholds: roi_sampler_obj = roi_sampler.ROISampler( mix_gt_boxes=False, foreground_iou_threshold=iou, background_iou_high_threshold=iou, background_iou_low_threshold=0.0, skip_subsampling=True) roi_sampler_cascade.append(roi_sampler_obj) roi_aligner_obj = roi_aligner.MultilevelROIAligner() detection_generator_obj = detection_generator.DetectionGenerator() if include_mask: mask_head = instance_heads.MaskHead( num_classes=num_classes, upsample_factor=2) mask_sampler_obj = mask_sampler.MaskSampler( mask_target_size=28, num_sampled_masks=1) mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14) else: mask_head = None mask_sampler_obj = None mask_roi_aligner_obj = None model = maskrcnn_model.MaskRCNNModel( backbone, decoder, rpn_head, detection_head, roi_generator_obj, roi_sampler_obj, roi_aligner_obj, detection_generator_obj, mask_head, mask_sampler_obj, mask_roi_aligner_obj, class_agnostic_bbox_pred=class_agnostic_bbox_pred, cascade_class_ensemble=cascade_class_ensemble, min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=anchor_size) gt_boxes = np.array( [[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]], [[100, 100, 150, 150], [-1, -1, -1, -1], [-1, -1, -1, -1]]], dtype=np.float32) gt_outer_boxes = np.array( [[[11, 11, 16.5, 16.5], [2.75, 2.75, 8.25, 8.25], [-1, -1, -1, -1]], [[110, 110, 165, 165], [-1, -1, -1, -1], [-1, -1, -1, -1]]], dtype=np.float32) gt_classes = np.array([[2, 1, -1], [1, -1, -1]], dtype=np.int32) if include_mask: gt_masks = np.ones((2, 3, 100, 100)) else: gt_masks = None results = model( images, image_shape, anchor_boxes, gt_boxes, gt_classes, gt_masks, gt_outer_boxes, training=training) self.assertIn('rpn_boxes', results) self.assertIn('rpn_scores', results) if training: self.assertIn('class_targets', results) self.assertIn('box_targets', results) self.assertIn('class_outputs', results) self.assertIn('box_outputs', results) if include_mask: self.assertIn('mask_outputs', results) else: self.assertIn('detection_boxes', results) self.assertIn('detection_scores', results) self.assertIn('detection_classes', results) self.assertIn('num_detections', results) if include_mask: self.assertIn('detection_masks', results) @parameterized.parameters( (False,), (True,), ) def test_serialize_deserialize(self, include_mask): input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3]) backbone = resnet.ResNet(model_id=50, input_specs=input_specs) decoder = fpn.FPN( min_level=3, max_level=7, input_specs=backbone.output_specs) rpn_head = dense_prediction_heads.RPNHead( min_level=3, max_level=7, num_anchors_per_location=3) detection_head = instance_heads.DetectionHead(num_classes=2) roi_generator_obj = roi_generator.MultilevelROIGenerator() roi_sampler_obj = roi_sampler.ROISampler() roi_aligner_obj = roi_aligner.MultilevelROIAligner() detection_generator_obj = detection_generator.DetectionGenerator() if include_mask: mask_head = instance_heads.MaskHead(num_classes=2, upsample_factor=2) mask_sampler_obj = mask_sampler.MaskSampler( mask_target_size=28, num_sampled_masks=1) mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14) else: mask_head = None mask_sampler_obj = None mask_roi_aligner_obj = None model = maskrcnn_model.MaskRCNNModel( backbone, decoder, rpn_head, detection_head, roi_generator_obj, roi_sampler_obj, roi_aligner_obj, detection_generator_obj, mask_head, mask_sampler_obj, mask_roi_aligner_obj, min_level=3, max_level=7, num_scales=3, aspect_ratios=[1.0], anchor_size=3) config = model.get_config() new_model = maskrcnn_model.MaskRCNNModel.from_config(config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) @parameterized.parameters( (False,), (True,), ) def test_checkpoint(self, include_mask): input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3]) backbone = resnet.ResNet(model_id=50, input_specs=input_specs) decoder = fpn.FPN( min_level=3, max_level=7, input_specs=backbone.output_specs) rpn_head = dense_prediction_heads.RPNHead( min_level=3, max_level=7, num_anchors_per_location=3) detection_head = instance_heads.DetectionHead(num_classes=2) roi_generator_obj = roi_generator.MultilevelROIGenerator() roi_sampler_obj = roi_sampler.ROISampler() roi_aligner_obj = roi_aligner.MultilevelROIAligner() detection_generator_obj = detection_generator.DetectionGenerator() if include_mask: mask_head = instance_heads.MaskHead(num_classes=2, upsample_factor=2) mask_sampler_obj = mask_sampler.MaskSampler( mask_target_size=28, num_sampled_masks=1) mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14) else: mask_head = None mask_sampler_obj = None mask_roi_aligner_obj = None model = maskrcnn_model.MaskRCNNModel( backbone, decoder, rpn_head, detection_head, roi_generator_obj, roi_sampler_obj, roi_aligner_obj, detection_generator_obj, mask_head, mask_sampler_obj, mask_roi_aligner_obj, min_level=3, max_level=7, num_scales=3, aspect_ratios=[1.0], anchor_size=3) expect_checkpoint_items = dict( backbone=backbone, decoder=decoder, rpn_head=rpn_head, detection_head=[detection_head]) if include_mask: expect_checkpoint_items['mask_head'] = mask_head self.assertAllEqual(expect_checkpoint_items, model.checkpoint_items) # Test save and load checkpoints. ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items) save_dir = self.create_tempdir().full_path ckpt.save(os.path.join(save_dir, 'ckpt')) partial_ckpt = tf.train.Checkpoint(backbone=backbone) partial_ckpt.read(tf.train.latest_checkpoint( save_dir)).expect_partial().assert_existing_objects_matched() if include_mask: partial_ckpt_mask = tf.train.Checkpoint( backbone=backbone, mask_head=mask_head) partial_ckpt_mask.restore(tf.train.latest_checkpoint( save_dir)).expect_partial().assert_existing_objects_matched() if __name__ == '__main__': tf.test.main()
14,834
35.094891
80
py
models
models-master/official/vision/modeling/maskrcnn_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """R-CNN(-RS) models.""" from typing import Any, List, Mapping, Optional, Tuple, Union import tensorflow as tf from official.vision.ops import anchor from official.vision.ops import box_ops @tf.keras.utils.register_keras_serializable(package='Vision') class MaskRCNNModel(tf.keras.Model): """The Mask R-CNN(-RS) and Cascade RCNN-RS models.""" def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.Model, rpn_head: tf.keras.layers.Layer, detection_head: Union[tf.keras.layers.Layer, List[tf.keras.layers.Layer]], roi_generator: tf.keras.layers.Layer, roi_sampler: Union[tf.keras.layers.Layer, List[tf.keras.layers.Layer]], roi_aligner: tf.keras.layers.Layer, detection_generator: tf.keras.layers.Layer, mask_head: Optional[tf.keras.layers.Layer] = None, mask_sampler: Optional[tf.keras.layers.Layer] = None, mask_roi_aligner: Optional[tf.keras.layers.Layer] = None, class_agnostic_bbox_pred: bool = False, cascade_class_ensemble: bool = False, min_level: Optional[int] = None, max_level: Optional[int] = None, num_scales: Optional[int] = None, aspect_ratios: Optional[List[float]] = None, anchor_size: Optional[float] = None, outer_boxes_scale: float = 1.0, **kwargs): """Initializes the R-CNN(-RS) model. Args: backbone: `tf.keras.Model`, the backbone network. decoder: `tf.keras.Model`, the decoder network. rpn_head: the RPN head. detection_head: the detection head or a list of heads. roi_generator: the ROI generator. roi_sampler: a single ROI sampler or a list of ROI samplers for cascade detection heads. roi_aligner: the ROI aligner. detection_generator: the detection generator. mask_head: the mask head. mask_sampler: the mask sampler. mask_roi_aligner: the ROI alginer for mask prediction. class_agnostic_bbox_pred: if True, perform class agnostic bounding box prediction. Needs to be `True` for Cascade RCNN models. cascade_class_ensemble: if True, ensemble classification scores over all detection heads. min_level: Minimum level in output feature maps. max_level: Maximum level in output feature maps. num_scales: A number representing intermediate scales added on each level. For instances, num_scales=2 adds one additional intermediate anchor scales [2^0, 2^0.5] on each level. aspect_ratios: A list representing the aspect raito anchors added on each level. The number indicates the ratio of width to height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level. anchor_size: A number representing the scale of size of the base anchor to the feature stride 2^level. outer_boxes_scale: a float to scale up the bounding boxes to generate more inclusive masks. The scale is expected to be >=1.0. **kwargs: keyword arguments to be passed. """ super().__init__(**kwargs) self._config_dict = { 'backbone': backbone, 'decoder': decoder, 'rpn_head': rpn_head, 'detection_head': detection_head, 'roi_generator': roi_generator, 'roi_sampler': roi_sampler, 'roi_aligner': roi_aligner, 'detection_generator': detection_generator, 'outer_boxes_scale': outer_boxes_scale, 'mask_head': mask_head, 'mask_sampler': mask_sampler, 'mask_roi_aligner': mask_roi_aligner, 'class_agnostic_bbox_pred': class_agnostic_bbox_pred, 'cascade_class_ensemble': cascade_class_ensemble, 'min_level': min_level, 'max_level': max_level, 'num_scales': num_scales, 'aspect_ratios': aspect_ratios, 'anchor_size': anchor_size, } self.backbone = backbone self.decoder = decoder self.rpn_head = rpn_head if not isinstance(detection_head, (list, tuple)): self.detection_head = [detection_head] else: self.detection_head = detection_head self.roi_generator = roi_generator if not isinstance(roi_sampler, (list, tuple)): self.roi_sampler = [roi_sampler] else: self.roi_sampler = roi_sampler if len(self.roi_sampler) > 1 and not class_agnostic_bbox_pred: raise ValueError( '`class_agnostic_bbox_pred` needs to be True if multiple detection heads are specified.' ) self.roi_aligner = roi_aligner self.detection_generator = detection_generator self._include_mask = mask_head is not None if outer_boxes_scale < 1.0: raise ValueError('`outer_boxes_scale` should be a value >= 1.0.') self.outer_boxes_scale = outer_boxes_scale self.mask_head = mask_head if self._include_mask and mask_sampler is None: raise ValueError('`mask_sampler` is not provided in Mask R-CNN.') self.mask_sampler = mask_sampler if self._include_mask and mask_roi_aligner is None: raise ValueError('`mask_roi_aligner` is not provided in Mask R-CNN.') self.mask_roi_aligner = mask_roi_aligner # Weights for the regression losses for each FRCNN layer. # TODO(jiageng): Make the weights configurable. self._cascade_layer_to_weights = [ [10.0, 10.0, 5.0, 5.0], [20.0, 20.0, 10.0, 10.0], [30.0, 30.0, 15.0, 15.0], ] def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks self, images: tf.Tensor, image_shape: tf.Tensor, anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None, gt_boxes: Optional[tf.Tensor] = None, gt_classes: Optional[tf.Tensor] = None, gt_masks: Optional[tf.Tensor] = None, gt_outer_boxes: Optional[tf.Tensor] = None, training: Optional[bool] = None) -> Mapping[str, Optional[tf.Tensor]]: call_box_outputs_kwargs = { 'images': images, 'image_shape': image_shape, 'anchor_boxes': anchor_boxes, 'gt_boxes': gt_boxes, 'gt_classes': gt_classes, 'training': training, } if self.outer_boxes_scale > 1.0: call_box_outputs_kwargs['gt_outer_boxes'] = gt_outer_boxes model_outputs, intermediate_outputs = self._call_box_outputs( **call_box_outputs_kwargs) if not self._include_mask: return model_outputs if self.outer_boxes_scale == 1.0: current_rois = intermediate_outputs['current_rois'] matched_gt_boxes = intermediate_outputs['matched_gt_boxes'] else: current_rois = box_ops.compute_outer_boxes( intermediate_outputs['current_rois'], tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale) matched_gt_boxes = intermediate_outputs['matched_gt_outer_boxes'] model_mask_outputs = self._call_mask_outputs( model_box_outputs=model_outputs, features=model_outputs['decoder_features'], current_rois=current_rois, matched_gt_indices=intermediate_outputs['matched_gt_indices'], matched_gt_boxes=matched_gt_boxes, matched_gt_classes=intermediate_outputs['matched_gt_classes'], gt_masks=gt_masks, training=training) model_outputs.update(model_mask_outputs) # pytype: disable=attribute-error # dynamic-method-lookup return model_outputs def _get_backbone_and_decoder_features(self, images): backbone_features = self.backbone(images) if self.decoder: features = self.decoder(backbone_features) else: features = backbone_features return backbone_features, features def _call_box_outputs( self, images: tf.Tensor, image_shape: tf.Tensor, anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None, gt_boxes: Optional[tf.Tensor] = None, gt_classes: Optional[tf.Tensor] = None, training: Optional[bool] = None, gt_outer_boxes: Optional[tf.Tensor] = None, ) -> Tuple[Mapping[str, Any], Mapping[str, Any]]: """Implementation of the Faster-RCNN logic for boxes.""" model_outputs = {} # Feature extraction. (backbone_features, decoder_features) = self._get_backbone_and_decoder_features(images) # Region proposal network. rpn_scores, rpn_boxes = self.rpn_head(decoder_features) model_outputs.update({ 'backbone_features': backbone_features, 'decoder_features': decoder_features, 'rpn_boxes': rpn_boxes, 'rpn_scores': rpn_scores }) # Generate anchor boxes for this batch if not provided. if anchor_boxes is None: _, image_height, image_width, _ = images.get_shape().as_list() anchor_boxes = anchor.Anchor( min_level=self._config_dict['min_level'], max_level=self._config_dict['max_level'], num_scales=self._config_dict['num_scales'], aspect_ratios=self._config_dict['aspect_ratios'], anchor_size=self._config_dict['anchor_size'], image_size=(image_height, image_width)).multilevel_boxes for l in anchor_boxes: anchor_boxes[l] = tf.tile( tf.expand_dims(anchor_boxes[l], axis=0), [tf.shape(images)[0], 1, 1, 1]) # Generate RoIs. current_rois, _ = self.roi_generator(rpn_boxes, rpn_scores, anchor_boxes, image_shape, training) next_rois = current_rois all_class_outputs = [] for cascade_num in range(len(self.roi_sampler)): # In cascade RCNN we want the higher layers to have different regression # weights as the predicted deltas become smaller and smaller. regression_weights = self._cascade_layer_to_weights[cascade_num] current_rois = next_rois if self.outer_boxes_scale == 1.0: (class_outputs, box_outputs, model_outputs, matched_gt_boxes, matched_gt_classes, matched_gt_indices, current_rois) = self._run_frcnn_head( features=decoder_features, rois=current_rois, gt_boxes=gt_boxes, gt_classes=gt_classes, training=training, model_outputs=model_outputs, cascade_num=cascade_num, regression_weights=regression_weights) else: (class_outputs, box_outputs, model_outputs, (matched_gt_boxes, matched_gt_outer_boxes), matched_gt_classes, matched_gt_indices, current_rois) = self._run_frcnn_head( features=decoder_features, rois=current_rois, gt_boxes=gt_boxes, gt_outer_boxes=gt_outer_boxes, gt_classes=gt_classes, training=training, model_outputs=model_outputs, cascade_num=cascade_num, regression_weights=regression_weights) all_class_outputs.append(class_outputs) # Generate ROIs for the next cascade head if there is any. if cascade_num < len(self.roi_sampler) - 1: next_rois = box_ops.decode_boxes( tf.cast(box_outputs, tf.float32), current_rois, weights=regression_weights) next_rois = box_ops.clip_boxes(next_rois, tf.expand_dims(image_shape, axis=1)) if not training: if self._config_dict['cascade_class_ensemble']: class_outputs = tf.add_n(all_class_outputs) / len(all_class_outputs) detections = self.detection_generator( box_outputs, class_outputs, current_rois, image_shape, regression_weights, bbox_per_class=(not self._config_dict['class_agnostic_bbox_pred'])) model_outputs.update({ 'cls_outputs': class_outputs, 'box_outputs': box_outputs, }) if self.detection_generator.get_config()['apply_nms']: model_outputs.update({ 'detection_boxes': detections['detection_boxes'], 'detection_scores': detections['detection_scores'], 'detection_classes': detections['detection_classes'], 'num_detections': detections['num_detections'] }) if self.outer_boxes_scale > 1.0: detection_outer_boxes = box_ops.compute_outer_boxes( detections['detection_boxes'], tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale) model_outputs['detection_outer_boxes'] = detection_outer_boxes else: model_outputs.update({ 'decoded_boxes': detections['decoded_boxes'], 'decoded_box_scores': detections['decoded_box_scores'] }) intermediate_outputs = { 'matched_gt_boxes': matched_gt_boxes, 'matched_gt_indices': matched_gt_indices, 'matched_gt_classes': matched_gt_classes, 'current_rois': current_rois, } if self.outer_boxes_scale > 1.0: intermediate_outputs['matched_gt_outer_boxes'] = matched_gt_outer_boxes return (model_outputs, intermediate_outputs) def _call_mask_outputs( self, model_box_outputs: Mapping[str, tf.Tensor], features: tf.Tensor, current_rois: tf.Tensor, matched_gt_indices: tf.Tensor, matched_gt_boxes: tf.Tensor, matched_gt_classes: tf.Tensor, gt_masks: tf.Tensor, training: Optional[bool] = None) -> Mapping[str, tf.Tensor]: """Implementation of Mask-RCNN mask prediction logic.""" model_outputs = dict(model_box_outputs) if training: current_rois, roi_classes, roi_masks = self.mask_sampler( current_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices, gt_masks) roi_masks = tf.stop_gradient(roi_masks) model_outputs.update({ 'mask_class_targets': roi_classes, 'mask_targets': roi_masks, }) else: if self.outer_boxes_scale == 1.0: current_rois = model_outputs['detection_boxes'] else: current_rois = model_outputs['detection_outer_boxes'] roi_classes = model_outputs['detection_classes'] mask_logits, mask_probs = self._features_to_mask_outputs( features, current_rois, roi_classes) if training: model_outputs.update({ 'mask_outputs': mask_logits, }) else: model_outputs.update({ 'detection_masks': mask_probs, }) return model_outputs def _run_frcnn_head(self, features, rois, gt_boxes, gt_classes, training, model_outputs, cascade_num, regression_weights, gt_outer_boxes=None): """Runs the frcnn head that does both class and box prediction. Args: features: `list` of features from the feature extractor. rois: `list` of current rois that will be used to predict bbox refinement and classes from. gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with a negative value. gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box classes. It is padded with -1s to indicate the invalid classes. training: `bool`, if model is training or being evaluated. model_outputs: `dict`, used for storing outputs used for eval and losses. cascade_num: `int`, the current frcnn layer in the cascade. regression_weights: `list`, weights used for l1 loss in bounding box regression. gt_outer_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with a negative value. Default to None. Returns: class_outputs: Class predictions for rois. box_outputs: Box predictions for rois. These are formatted for the regression loss and need to be converted before being used as rois in the next stage. model_outputs: Updated dict with predictions used for losses and eval. matched_gt_boxes: If `is_training` is true, then these give the gt box location of its positive match. matched_gt_classes: If `is_training` is true, then these give the gt class of the predicted box. matched_gt_boxes: If `is_training` is true, then these give the box location of its positive match. matched_gt_outer_boxes: If `is_training` is true, then these give the outer box location of its positive match. Only exist if outer_boxes_scale is greater than 1.0. matched_gt_indices: If `is_training` is true, then gives the index of the positive box match. Used for mask prediction. rois: The sampled rois used for this layer. """ # Only used during training. matched_gt_boxes, matched_gt_classes, matched_gt_indices = None, None, None if self.outer_boxes_scale > 1.0: matched_gt_outer_boxes = None if training and gt_boxes is not None: rois = tf.stop_gradient(rois) current_roi_sampler = self.roi_sampler[cascade_num] if self.outer_boxes_scale == 1.0: rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices = ( current_roi_sampler(rois, gt_boxes, gt_classes)) else: (rois, matched_gt_boxes, matched_gt_outer_boxes, matched_gt_classes, matched_gt_indices) = current_roi_sampler(rois, gt_boxes, gt_classes, gt_outer_boxes) # Create bounding box training targets. box_targets = box_ops.encode_boxes( matched_gt_boxes, rois, weights=regression_weights) # If the target is background, the box target is set to all 0s. box_targets = tf.where( tf.tile( tf.expand_dims(tf.equal(matched_gt_classes, 0), axis=-1), [1, 1, 4]), tf.zeros_like(box_targets), box_targets) model_outputs.update({ 'class_targets_{}'.format(cascade_num) if cascade_num else 'class_targets': matched_gt_classes, 'box_targets_{}'.format(cascade_num) if cascade_num else 'box_targets': box_targets, }) # Get roi features. roi_features = self.roi_aligner(features, rois) # Run frcnn head to get class and bbox predictions. current_detection_head = self.detection_head[cascade_num] class_outputs, box_outputs = current_detection_head(roi_features) model_outputs.update({ 'class_outputs_{}'.format(cascade_num) if cascade_num else 'class_outputs': class_outputs, 'box_outputs_{}'.format(cascade_num) if cascade_num else 'box_outputs': box_outputs, }) if self.outer_boxes_scale == 1.0: return (class_outputs, box_outputs, model_outputs, matched_gt_boxes, matched_gt_classes, matched_gt_indices, rois) else: return (class_outputs, box_outputs, model_outputs, (matched_gt_boxes, matched_gt_outer_boxes), matched_gt_classes, matched_gt_indices, rois) def _features_to_mask_outputs(self, features, rois, roi_classes): # Mask RoI align. mask_roi_features = self.mask_roi_aligner(features, rois) # Mask head. raw_masks = self.mask_head([mask_roi_features, roi_classes]) return raw_masks, tf.nn.sigmoid(raw_masks) @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict( backbone=self.backbone, rpn_head=self.rpn_head, detection_head=self.detection_head) if self.decoder is not None: items.update(decoder=self.decoder) if self._include_mask: items.update(mask_head=self.mask_head) return items def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
20,733
39.734774
104
py
models
models-master/official/vision/modeling/retinanet_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RetinaNet.""" from typing import Any, Mapping, List, Optional, Union, Sequence # Import libraries import tensorflow as tf from official.vision.ops import anchor @tf.keras.utils.register_keras_serializable(package='Vision') class RetinaNetModel(tf.keras.Model): """The RetinaNet model class.""" def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.Model, head: tf.keras.layers.Layer, detection_generator: tf.keras.layers.Layer, min_level: Optional[int] = None, max_level: Optional[int] = None, num_scales: Optional[int] = None, aspect_ratios: Optional[List[float]] = None, anchor_size: Optional[float] = None, **kwargs): """Detection initialization function. Args: backbone: `tf.keras.Model` a backbone network. decoder: `tf.keras.Model` a decoder network. head: `RetinaNetHead`, the RetinaNet head. detection_generator: the detection generator. min_level: Minimum level in output feature maps. max_level: Maximum level in output feature maps. num_scales: A number representing intermediate scales added on each level. For instances, num_scales=2 adds one additional intermediate anchor scales [2^0, 2^0.5] on each level. aspect_ratios: A list representing the aspect raito anchors added on each level. The number indicates the ratio of width to height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level. anchor_size: A number representing the scale of size of the base anchor to the feature stride 2^level. **kwargs: keyword arguments to be passed. """ super(RetinaNetModel, self).__init__(**kwargs) self._config_dict = { 'backbone': backbone, 'decoder': decoder, 'head': head, 'detection_generator': detection_generator, 'min_level': min_level, 'max_level': max_level, 'num_scales': num_scales, 'aspect_ratios': aspect_ratios, 'anchor_size': anchor_size, } self._backbone = backbone self._decoder = decoder self._head = head self._detection_generator = detection_generator def call(self, images: Union[tf.Tensor, Sequence[tf.Tensor]], image_shape: Optional[tf.Tensor] = None, anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None, output_intermediate_features: bool = False, training: bool = None) -> Mapping[str, tf.Tensor]: """Forward pass of the RetinaNet model. Args: images: `Tensor` or a sequence of `Tensor`, the input batched images to the backbone network, whose shape(s) is [batch, height, width, 3]. If it is a sequence of `Tensor`, we will assume the anchors are generated based on the shape of the first image(s). image_shape: `Tensor`, the actual shape of the input images, whose shape is [batch, 2] where the last dimension is [height, width]. Note that this is the actual image shape excluding paddings. For example, images in the batch may be resized into different shapes before padding to the fixed size. anchor_boxes: a dict of tensors which includes multilevel anchors. - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the anchor coordinates of a particular feature level, whose shape is [height_l, width_l, num_anchors_per_location]. output_intermediate_features: `bool` indicating whether to return the intermediate feature maps generated by backbone and decoder. training: `bool`, indicating whether it is in training mode. Returns: scores: a dict of tensors which includes scores of the predictions. - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_classes * num_anchors_per_location]. boxes: a dict of tensors which includes coordinates of the predictions. - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the box coordinates predicted from a particular feature level, whose shape is [batch, height_l, width_l, 4 * num_anchors_per_location]. attributes: a dict of (attribute_name, attribute_predictions). Each attribute prediction is a dict that includes: - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the attribute predictions from a particular feature level, whose shape is [batch, height_l, width_l, att_size * num_anchors_per_location]. """ outputs = {} # Feature extraction. features = self.backbone(images) if output_intermediate_features: outputs.update( {'backbone_{}'.format(k): v for k, v in features.items()}) if self.decoder: features = self.decoder(features) if output_intermediate_features: outputs.update( {'decoder_{}'.format(k): v for k, v in features.items()}) # Dense prediction. `raw_attributes` can be empty. raw_scores, raw_boxes, raw_attributes = self.head(features) if training: outputs.update({ 'cls_outputs': raw_scores, 'box_outputs': raw_boxes, }) if raw_attributes: outputs.update({'attribute_outputs': raw_attributes}) return outputs else: # Generate anchor boxes for this batch if not provided. if anchor_boxes is None: if isinstance(images, Sequence): primary_images = images[0] elif isinstance(images, tf.Tensor): primary_images = images else: raise ValueError( 'Input should be a tf.Tensor or a sequence of tf.Tensor, not {}.' .format(type(images))) _, image_height, image_width, _ = primary_images.get_shape().as_list() anchor_boxes = anchor.Anchor( min_level=self._config_dict['min_level'], max_level=self._config_dict['max_level'], num_scales=self._config_dict['num_scales'], aspect_ratios=self._config_dict['aspect_ratios'], anchor_size=self._config_dict['anchor_size'], image_size=(image_height, image_width)).multilevel_boxes for l in anchor_boxes: anchor_boxes[l] = tf.tile( tf.expand_dims(anchor_boxes[l], axis=0), [tf.shape(primary_images)[0], 1, 1, 1]) # Post-processing. final_results = self.detection_generator(raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes) outputs.update({ 'cls_outputs': raw_scores, 'box_outputs': raw_boxes, }) def _update_decoded_results(): outputs.update({ 'decoded_boxes': final_results['decoded_boxes'], 'decoded_box_scores': final_results['decoded_box_scores'], }) if final_results.get('decoded_box_attributes') is not None: outputs['decoded_box_attributes'] = final_results[ 'decoded_box_attributes' ] if self.detection_generator.get_config()['apply_nms']: outputs.update({ 'detection_boxes': final_results['detection_boxes'], 'detection_scores': final_results['detection_scores'], 'detection_classes': final_results['detection_classes'], 'num_detections': final_results['num_detections'], }) # Users can choose to include the decoded results (boxes before NMS) in # the output tensor dict even if `apply_nms` is set to `True`. if self.detection_generator.get_config()['return_decoded']: _update_decoded_results() else: _update_decoded_results() if raw_attributes: outputs.update({ 'attribute_outputs': raw_attributes, 'detection_attributes': final_results['detection_attributes'], }) return outputs @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict(backbone=self.backbone, head=self.head) if self.decoder is not None: items.update(decoder=self.decoder) return items @property def backbone(self) -> tf.keras.Model: return self._backbone @property def decoder(self) -> tf.keras.Model: return self._decoder @property def head(self) -> tf.keras.layers.Layer: return self._head @property def detection_generator(self) -> tf.keras.layers.Layer: return self._detection_generator def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
9,695
39.4
80
py
models
models-master/official/vision/modeling/retinanet_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RetinaNet models.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling import retinanet_model from official.vision.modeling.backbones import resnet from official.vision.modeling.decoders import fpn from official.vision.modeling.heads import dense_prediction_heads from official.vision.modeling.layers import detection_generator from official.vision.ops import anchor class RetinaNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( { 'use_separable_conv': True, 'build_anchor_boxes': True, 'is_training': False, 'has_att_heads': False }, { 'use_separable_conv': False, 'build_anchor_boxes': True, 'is_training': False, 'has_att_heads': False }, { 'use_separable_conv': False, 'build_anchor_boxes': False, 'is_training': False, 'has_att_heads': False }, { 'use_separable_conv': False, 'build_anchor_boxes': False, 'is_training': True, 'has_att_heads': False }, { 'use_separable_conv': False, 'build_anchor_boxes': True, 'is_training': True, 'has_att_heads': True }, { 'use_separable_conv': False, 'build_anchor_boxes': True, 'is_training': False, 'has_att_heads': True }, ) def test_build_model(self, use_separable_conv, build_anchor_boxes, is_training, has_att_heads): num_classes = 3 min_level = 3 max_level = 7 num_scales = 3 aspect_ratios = [1.0] anchor_size = 3 fpn_num_filters = 256 head_num_convs = 4 head_num_filters = 256 num_anchors_per_location = num_scales * len(aspect_ratios) image_size = 384 images = np.random.rand(2, image_size, image_size, 3) image_shape = np.array([[image_size, image_size], [image_size, image_size]]) if build_anchor_boxes: anchor_boxes = anchor.Anchor( min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=anchor_size, image_size=(image_size, image_size)).multilevel_boxes for l in anchor_boxes: anchor_boxes[l] = tf.tile( tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1]) else: anchor_boxes = None if has_att_heads: attribute_heads = [ dict( name='depth', type='regression', size=1, prediction_tower_name='') ] else: attribute_heads = None backbone = resnet.ResNet(model_id=50) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level, num_filters=fpn_num_filters, use_separable_conv=use_separable_conv) head = dense_prediction_heads.RetinaNetHead( min_level=min_level, max_level=max_level, num_classes=num_classes, attribute_heads=attribute_heads, num_anchors_per_location=num_anchors_per_location, use_separable_conv=use_separable_conv, num_convs=head_num_convs, num_filters=head_num_filters) generator = detection_generator.MultilevelDetectionGenerator( max_num_detections=10) model = retinanet_model.RetinaNetModel( backbone=backbone, decoder=decoder, head=head, detection_generator=generator, min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=anchor_size) _ = model(images, image_shape, anchor_boxes, training=is_training) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], image_size=[ (128, 128), ], training=[True, False], has_att_heads=[True, False], output_intermediate_features=[True, False], soft_nms_sigma=[None, 0.0, 0.1], )) def test_forward(self, strategy, image_size, training, has_att_heads, output_intermediate_features, soft_nms_sigma): """Test for creation of a R50-FPN RetinaNet.""" tf.keras.backend.set_image_data_format('channels_last') num_classes = 3 min_level = 3 max_level = 7 num_scales = 3 aspect_ratios = [1.0] num_anchors_per_location = num_scales * len(aspect_ratios) images = np.random.rand(2, image_size[0], image_size[1], 3) image_shape = np.array( [[image_size[0], image_size[1]], [image_size[0], image_size[1]]]) with strategy.scope(): anchor_gen = anchor.build_anchor_generator( min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=3) anchor_boxes = anchor_gen(image_size) for l in anchor_boxes: anchor_boxes[l] = tf.tile( tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1]) backbone = resnet.ResNet(model_id=50) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level) if has_att_heads: attribute_heads = [ dict( name='depth', type='regression', size=1, prediction_tower_name='') ] else: attribute_heads = None head = dense_prediction_heads.RetinaNetHead( min_level=min_level, max_level=max_level, num_classes=num_classes, attribute_heads=attribute_heads, num_anchors_per_location=num_anchors_per_location) generator = detection_generator.MultilevelDetectionGenerator( max_num_detections=10, nms_version='v1', use_cpu_nms=soft_nms_sigma is not None, soft_nms_sigma=soft_nms_sigma) model = retinanet_model.RetinaNetModel( backbone=backbone, decoder=decoder, head=head, detection_generator=generator) model_outputs = model( images, image_shape, anchor_boxes, output_intermediate_features=output_intermediate_features, training=training) if training: cls_outputs = model_outputs['cls_outputs'] box_outputs = model_outputs['box_outputs'] for level in range(min_level, max_level + 1): self.assertIn(str(level), cls_outputs) self.assertIn(str(level), box_outputs) self.assertAllEqual([ 2, image_size[0] // 2**level, image_size[1] // 2**level, num_classes * num_anchors_per_location ], cls_outputs[str(level)].numpy().shape) self.assertAllEqual([ 2, image_size[0] // 2**level, image_size[1] // 2**level, 4 * num_anchors_per_location ], box_outputs[str(level)].numpy().shape) if has_att_heads: att_outputs = model_outputs['attribute_outputs'] for att in att_outputs.values(): self.assertAllEqual([ 2, image_size[0] // 2**level, image_size[1] // 2**level, 1 * num_anchors_per_location ], att[str(level)].numpy().shape) else: self.assertIn('detection_boxes', model_outputs) self.assertIn('detection_scores', model_outputs) self.assertIn('detection_classes', model_outputs) self.assertIn('num_detections', model_outputs) self.assertAllEqual( [2, 10, 4], model_outputs['detection_boxes'].numpy().shape) self.assertAllEqual( [2, 10], model_outputs['detection_scores'].numpy().shape) self.assertAllEqual( [2, 10], model_outputs['detection_classes'].numpy().shape) self.assertAllEqual( [2,], model_outputs['num_detections'].numpy().shape) if has_att_heads: self.assertIn('detection_attributes', model_outputs) self.assertAllEqual( [2, 10, 1], model_outputs['detection_attributes']['depth'].numpy().shape) if output_intermediate_features: for l in range(2, 6): self.assertIn('backbone_{}'.format(l), model_outputs) self.assertAllEqual([ 2, image_size[0] // 2**l, image_size[1] // 2**l, backbone.output_specs[str(l)].as_list()[-1] ], model_outputs['backbone_{}'.format(l)].numpy().shape) for l in range(min_level, max_level + 1): self.assertIn('decoder_{}'.format(l), model_outputs) self.assertAllEqual([ 2, image_size[0] // 2**l, image_size[1] // 2**l, decoder.output_specs[str(l)].as_list()[-1] ], model_outputs['decoder_{}'.format(l)].numpy().shape) def test_serialize_deserialize(self): """Validate the network can be serialized and deserialized.""" num_classes = 3 min_level = 3 max_level = 7 num_scales = 3 aspect_ratios = [1.0] num_anchors_per_location = num_scales * len(aspect_ratios) backbone = resnet.ResNet(model_id=50) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level) head = dense_prediction_heads.RetinaNetHead( min_level=min_level, max_level=max_level, num_classes=num_classes, num_anchors_per_location=num_anchors_per_location) generator = detection_generator.MultilevelDetectionGenerator( max_num_detections=10) model = retinanet_model.RetinaNetModel( backbone=backbone, decoder=decoder, head=head, detection_generator=generator, min_level=min_level, max_level=max_level, num_scales=num_scales, aspect_ratios=aspect_ratios, anchor_size=3) config = model.get_config() new_model = retinanet_model.RetinaNetModel.from_config(config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) if __name__ == '__main__': tf.test.main()
11,148
33.517028
80
py
models
models-master/official/vision/modeling/segmentation_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for segmentation network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.vision.modeling import backbones from official.vision.modeling import segmentation_model from official.vision.modeling.decoders import fpn from official.vision.modeling.heads import segmentation_heads class SegmentationNetworkTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 2), (128, 3), (128, 4), (256, 2), (256, 3), (256, 4), ) def test_segmentation_network_creation( self, input_size, level): """Test for creation of a segmentation network.""" num_classes = 10 inputs = np.random.rand(2, input_size, input_size, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.ResNet(model_id=50) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=2, max_level=7) head = segmentation_heads.SegmentationHead(num_classes, level=level) model = segmentation_model.SegmentationModel( backbone=backbone, decoder=decoder, head=head, mask_scoring_head=None, ) outputs = model(inputs) self.assertAllEqual( [2, input_size // (2**level), input_size // (2**level), num_classes], outputs['logits'].numpy().shape) def test_serialize_deserialize(self): """Validate the network can be serialized and deserialized.""" num_classes = 3 backbone = backbones.ResNet(model_id=50) decoder = fpn.FPN( input_specs=backbone.output_specs, min_level=3, max_level=7) head = segmentation_heads.SegmentationHead(num_classes, level=3) model = segmentation_model.SegmentationModel( backbone=backbone, decoder=decoder, head=head ) config = model.get_config() new_model = segmentation_model.SegmentationModel.from_config(config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) if __name__ == '__main__': tf.test.main()
2,807
31.651163
79
py
models
models-master/official/vision/modeling/classification_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for classification network.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling import backbones from official.vision.modeling import classification_model class ClassificationNetworkTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (192 * 4, 3, 12, 192, 5524416), (384 * 4, 6, 12, 384, 21665664), ) def test_vision_transformer_creation(self, mlp_dim, num_heads, num_layers, hidden_size, num_params): """Test for creation of a Vision Transformer classifier.""" inputs = np.random.rand(2, 224, 224, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.VisionTransformer( mlp_dim=mlp_dim, num_heads=num_heads, num_layers=num_layers, hidden_size=hidden_size, input_specs=tf.keras.layers.InputSpec(shape=[None, 224, 224, 3]), ) self.assertEqual(backbone.count_params(), num_params) num_classes = 1000 model = classification_model.ClassificationModel( backbone=backbone, num_classes=num_classes, dropout_rate=0.2, ) logits = model(inputs) self.assertAllEqual([2, num_classes], logits.numpy().shape) @parameterized.parameters( (128, 50, 'relu'), (128, 50, 'relu'), (128, 50, 'swish'), ) def test_resnet_network_creation(self, input_size, resnet_model_id, activation): """Test for creation of a ResNet-50 classifier.""" inputs = np.random.rand(2, input_size, input_size, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.ResNet(model_id=resnet_model_id, activation=activation) self.assertEqual(backbone.count_params(), 23561152) num_classes = 1000 model = classification_model.ClassificationModel( backbone=backbone, num_classes=num_classes, dropout_rate=0.2, ) self.assertEqual(model.count_params(), 25610152) logits = model(inputs) self.assertAllEqual([2, num_classes], logits.numpy().shape) def test_revnet_network_creation(self): """Test for creation of a RevNet-56 classifier.""" revnet_model_id = 56 inputs = np.random.rand(2, 224, 224, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.RevNet(model_id=revnet_model_id) self.assertEqual(backbone.count_params(), 19473792) num_classes = 1000 model = classification_model.ClassificationModel( backbone=backbone, num_classes=num_classes, dropout_rate=0.2, add_head_batch_norm=True, ) self.assertEqual(model.count_params(), 22816104) logits = model(inputs) self.assertAllEqual([2, num_classes], logits.numpy().shape) @combinations.generate( combinations.combine( mobilenet_model_id=[ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', ], filter_size_scale=[1.0, 0.75], )) def test_mobilenet_network_creation(self, mobilenet_model_id, filter_size_scale): """Test for creation of a MobileNet classifier.""" inputs = np.random.rand(2, 224, 224, 3) tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.MobileNet( model_id=mobilenet_model_id, filter_size_scale=filter_size_scale) num_classes = 1001 model = classification_model.ClassificationModel( backbone=backbone, num_classes=num_classes, dropout_rate=0.2, ) logits = model(inputs) self.assertAllEqual([2, num_classes], logits.numpy().shape) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], use_sync_bn=[False, True], )) def test_sync_bn_multiple_devices(self, strategy, use_sync_bn): """Test for sync bn on TPU and GPU devices.""" inputs = np.random.rand(64, 128, 128, 3) tf.keras.backend.set_image_data_format('channels_last') with strategy.scope(): backbone = backbones.ResNet(model_id=50, use_sync_bn=use_sync_bn) model = classification_model.ClassificationModel( backbone=backbone, num_classes=1000, dropout_rate=0.2, ) _ = model(inputs) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.one_device_strategy_gpu, ], data_format=['channels_last', 'channels_first'], input_dim=[1, 3, 4])) def test_data_format_gpu(self, strategy, data_format, input_dim): """Test for different data formats on GPU devices.""" if data_format == 'channels_last': inputs = np.random.rand(2, 128, 128, input_dim) else: inputs = np.random.rand(2, input_dim, 128, 128) input_specs = tf.keras.layers.InputSpec(shape=inputs.shape) tf.keras.backend.set_image_data_format(data_format) with strategy.scope(): backbone = backbones.ResNet(model_id=50, input_specs=input_specs) model = classification_model.ClassificationModel( backbone=backbone, num_classes=1000, input_specs=input_specs, ) _ = model(inputs) def test_serialize_deserialize(self): """Validate the classification net can be serialized and deserialized.""" tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.ResNet(model_id=50) model = classification_model.ClassificationModel( backbone=backbone, num_classes=1000) config = model.get_config() new_model = classification_model.ClassificationModel.from_config(config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) if __name__ == '__main__': tf.test.main()
7,007
31.901408
80
py
models
models-master/official/vision/modeling/factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factory methods to build models.""" from typing import Optional import tensorflow as tf from official.vision.configs import image_classification as classification_cfg from official.vision.configs import maskrcnn as maskrcnn_cfg from official.vision.configs import retinanet as retinanet_cfg from official.vision.configs import semantic_segmentation as segmentation_cfg from official.vision.modeling import backbones from official.vision.modeling import classification_model from official.vision.modeling import decoders from official.vision.modeling import maskrcnn_model from official.vision.modeling import retinanet_model from official.vision.modeling import segmentation_model from official.vision.modeling.heads import dense_prediction_heads from official.vision.modeling.heads import instance_heads from official.vision.modeling.heads import segmentation_heads from official.vision.modeling.layers import detection_generator from official.vision.modeling.layers import mask_sampler from official.vision.modeling.layers import roi_aligner from official.vision.modeling.layers import roi_generator from official.vision.modeling.layers import roi_sampler def build_classification_model( input_specs: tf.keras.layers.InputSpec, model_config: classification_cfg.ImageClassificationModel, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, skip_logits_layer: bool = False, backbone: Optional[tf.keras.Model] = None) -> tf.keras.Model: """Builds the classification model.""" norm_activation_config = model_config.norm_activation if not backbone: backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) model = classification_model.ClassificationModel( backbone=backbone, num_classes=model_config.num_classes, input_specs=input_specs, dropout_rate=model_config.dropout_rate, kernel_initializer=model_config.kernel_initializer, kernel_regularizer=l2_regularizer, add_head_batch_norm=model_config.add_head_batch_norm, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, skip_logits_layer=skip_logits_layer) return model def build_maskrcnn(input_specs: tf.keras.layers.InputSpec, model_config: maskrcnn_cfg.MaskRCNN, l2_regularizer: Optional[ tf.keras.regularizers.Regularizer] = None, backbone: Optional[tf.keras.Model] = None, decoder: Optional[tf.keras.Model] = None) -> tf.keras.Model: """Builds Mask R-CNN model.""" norm_activation_config = model_config.norm_activation if not backbone: backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) backbone_features = backbone(tf.keras.Input(input_specs.shape[1:])) if not decoder: decoder = decoders.factory.build_decoder( input_specs=backbone.output_specs, model_config=model_config, l2_regularizer=l2_regularizer) rpn_head_config = model_config.rpn_head roi_generator_config = model_config.roi_generator roi_sampler_config = model_config.roi_sampler roi_aligner_config = model_config.roi_aligner detection_head_config = model_config.detection_head generator_config = model_config.detection_generator num_anchors_per_location = ( len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales) rpn_head = dense_prediction_heads.RPNHead( min_level=model_config.min_level, max_level=model_config.max_level, num_anchors_per_location=num_anchors_per_location, num_convs=rpn_head_config.num_convs, num_filters=rpn_head_config.num_filters, use_separable_conv=rpn_head_config.use_separable_conv, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer) detection_head = instance_heads.DetectionHead( num_classes=model_config.num_classes, num_convs=detection_head_config.num_convs, num_filters=detection_head_config.num_filters, use_separable_conv=detection_head_config.use_separable_conv, num_fcs=detection_head_config.num_fcs, fc_dims=detection_head_config.fc_dims, class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, name='detection_head') if decoder: decoder_features = decoder(backbone_features) rpn_head(decoder_features) if roi_sampler_config.cascade_iou_thresholds: detection_head_cascade = [detection_head] for cascade_num in range(len(roi_sampler_config.cascade_iou_thresholds)): detection_head = instance_heads.DetectionHead( num_classes=model_config.num_classes, num_convs=detection_head_config.num_convs, num_filters=detection_head_config.num_filters, use_separable_conv=detection_head_config.use_separable_conv, num_fcs=detection_head_config.num_fcs, fc_dims=detection_head_config.fc_dims, class_agnostic_bbox_pred=detection_head_config .class_agnostic_bbox_pred, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, name='detection_head_{}'.format(cascade_num + 1)) detection_head_cascade.append(detection_head) detection_head = detection_head_cascade roi_generator_obj = roi_generator.MultilevelROIGenerator( pre_nms_top_k=roi_generator_config.pre_nms_top_k, pre_nms_score_threshold=roi_generator_config.pre_nms_score_threshold, pre_nms_min_size_threshold=( roi_generator_config.pre_nms_min_size_threshold), nms_iou_threshold=roi_generator_config.nms_iou_threshold, num_proposals=roi_generator_config.num_proposals, test_pre_nms_top_k=roi_generator_config.test_pre_nms_top_k, test_pre_nms_score_threshold=( roi_generator_config.test_pre_nms_score_threshold), test_pre_nms_min_size_threshold=( roi_generator_config.test_pre_nms_min_size_threshold), test_nms_iou_threshold=roi_generator_config.test_nms_iou_threshold, test_num_proposals=roi_generator_config.test_num_proposals, use_batched_nms=roi_generator_config.use_batched_nms) roi_sampler_cascade = [] roi_sampler_obj = roi_sampler.ROISampler( mix_gt_boxes=roi_sampler_config.mix_gt_boxes, num_sampled_rois=roi_sampler_config.num_sampled_rois, foreground_fraction=roi_sampler_config.foreground_fraction, foreground_iou_threshold=roi_sampler_config.foreground_iou_threshold, background_iou_high_threshold=( roi_sampler_config.background_iou_high_threshold), background_iou_low_threshold=( roi_sampler_config.background_iou_low_threshold)) roi_sampler_cascade.append(roi_sampler_obj) # Initialize additional roi simplers for cascade heads. if roi_sampler_config.cascade_iou_thresholds: for iou in roi_sampler_config.cascade_iou_thresholds: roi_sampler_obj = roi_sampler.ROISampler( mix_gt_boxes=False, num_sampled_rois=roi_sampler_config.num_sampled_rois, foreground_iou_threshold=iou, background_iou_high_threshold=iou, background_iou_low_threshold=0.0, skip_subsampling=True) roi_sampler_cascade.append(roi_sampler_obj) roi_aligner_obj = roi_aligner.MultilevelROIAligner( crop_size=roi_aligner_config.crop_size, sample_offset=roi_aligner_config.sample_offset) detection_generator_obj = detection_generator.DetectionGenerator( apply_nms=generator_config.apply_nms, pre_nms_top_k=generator_config.pre_nms_top_k, pre_nms_score_threshold=generator_config.pre_nms_score_threshold, nms_iou_threshold=generator_config.nms_iou_threshold, max_num_detections=generator_config.max_num_detections, nms_version=generator_config.nms_version, use_cpu_nms=generator_config.use_cpu_nms, soft_nms_sigma=generator_config.soft_nms_sigma, use_sigmoid_probability=generator_config.use_sigmoid_probability) if model_config.include_mask: mask_head = instance_heads.MaskHead( num_classes=model_config.num_classes, upsample_factor=model_config.mask_head.upsample_factor, num_convs=model_config.mask_head.num_convs, num_filters=model_config.mask_head.num_filters, use_separable_conv=model_config.mask_head.use_separable_conv, activation=model_config.norm_activation.activation, norm_momentum=model_config.norm_activation.norm_momentum, norm_epsilon=model_config.norm_activation.norm_epsilon, kernel_regularizer=l2_regularizer, class_agnostic=model_config.mask_head.class_agnostic) mask_sampler_obj = mask_sampler.MaskSampler( mask_target_size=( model_config.mask_roi_aligner.crop_size * model_config.mask_head.upsample_factor), num_sampled_masks=model_config.mask_sampler.num_sampled_masks) mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner( crop_size=model_config.mask_roi_aligner.crop_size, sample_offset=model_config.mask_roi_aligner.sample_offset) else: mask_head = None mask_sampler_obj = None mask_roi_aligner_obj = None model = maskrcnn_model.MaskRCNNModel( backbone=backbone, decoder=decoder, rpn_head=rpn_head, detection_head=detection_head, roi_generator=roi_generator_obj, roi_sampler=roi_sampler_cascade, roi_aligner=roi_aligner_obj, detection_generator=detection_generator_obj, mask_head=mask_head, mask_sampler=mask_sampler_obj, mask_roi_aligner=mask_roi_aligner_obj, class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred, cascade_class_ensemble=detection_head_config.cascade_class_ensemble, min_level=model_config.min_level, max_level=model_config.max_level, num_scales=model_config.anchor.num_scales, aspect_ratios=model_config.anchor.aspect_ratios, anchor_size=model_config.anchor.anchor_size, outer_boxes_scale=model_config.outer_boxes_scale) return model def build_retinanet( input_specs: tf.keras.layers.InputSpec, model_config: retinanet_cfg.RetinaNet, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, backbone: Optional[tf.keras.Model] = None, decoder: Optional[tf.keras.Model] = None ) -> tf.keras.Model: """Builds RetinaNet model.""" norm_activation_config = model_config.norm_activation if not backbone: backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) backbone_features = backbone(tf.keras.Input(input_specs.shape[1:])) if not decoder: decoder = decoders.factory.build_decoder( input_specs=backbone.output_specs, model_config=model_config, l2_regularizer=l2_regularizer) head_config = model_config.head generator_config = model_config.detection_generator num_anchors_per_location = ( len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales) head = dense_prediction_heads.RetinaNetHead( min_level=model_config.min_level, max_level=model_config.max_level, num_classes=model_config.num_classes, num_anchors_per_location=num_anchors_per_location, num_convs=head_config.num_convs, num_filters=head_config.num_filters, attribute_heads=[ cfg.as_dict() for cfg in (head_config.attribute_heads or []) ], share_classification_heads=head_config.share_classification_heads, use_separable_conv=head_config.use_separable_conv, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, share_level_convs=head_config.share_level_convs, ) # Builds decoder and head so that their trainable weights are initialized if decoder: decoder_features = decoder(backbone_features) _ = head(decoder_features) # Add `input_image_size` into `tflite_post_processing_config`. tflite_post_processing_config = generator_config.tflite_post_processing.as_dict( ) tflite_post_processing_config['input_image_size'] = (input_specs.shape[1], input_specs.shape[2]) detection_generator_obj = detection_generator.MultilevelDetectionGenerator( apply_nms=generator_config.apply_nms, pre_nms_top_k=generator_config.pre_nms_top_k, pre_nms_score_threshold=generator_config.pre_nms_score_threshold, nms_iou_threshold=generator_config.nms_iou_threshold, max_num_detections=generator_config.max_num_detections, nms_version=generator_config.nms_version, use_cpu_nms=generator_config.use_cpu_nms, soft_nms_sigma=generator_config.soft_nms_sigma, tflite_post_processing_config=tflite_post_processing_config, return_decoded=generator_config.return_decoded, use_class_agnostic_nms=generator_config.use_class_agnostic_nms, ) model = retinanet_model.RetinaNetModel( backbone, decoder, head, detection_generator_obj, min_level=model_config.min_level, max_level=model_config.max_level, num_scales=model_config.anchor.num_scales, aspect_ratios=model_config.anchor.aspect_ratios, anchor_size=model_config.anchor.anchor_size) return model def build_segmentation_model( input_specs: tf.keras.layers.InputSpec, model_config: segmentation_cfg.SemanticSegmentationModel, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, backbone: Optional[tf.keras.Model] = None, decoder: Optional[tf.keras.Model] = None ) -> tf.keras.Model: """Builds Segmentation model.""" norm_activation_config = model_config.norm_activation if not backbone: backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) if not decoder: decoder = decoders.factory.build_decoder( input_specs=backbone.output_specs, model_config=model_config, l2_regularizer=l2_regularizer) head_config = model_config.head head = segmentation_heads.SegmentationHead( num_classes=model_config.num_classes, level=head_config.level, num_convs=head_config.num_convs, prediction_kernel_size=head_config.prediction_kernel_size, num_filters=head_config.num_filters, use_depthwise_convolution=head_config.use_depthwise_convolution, upsample_factor=head_config.upsample_factor, feature_fusion=head_config.feature_fusion, low_level=head_config.low_level, low_level_num_filters=head_config.low_level_num_filters, activation=norm_activation_config.activation, logit_activation=head_config.logit_activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer) mask_scoring_head = None if model_config.mask_scoring_head: mask_scoring_head = segmentation_heads.MaskScoring( num_classes=model_config.num_classes, **model_config.mask_scoring_head.as_dict(), activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer) model = segmentation_model.SegmentationModel( backbone, decoder, head, mask_scoring_head=mask_scoring_head) return model
17,496
42.633416
82
py
models
models-master/official/vision/modeling/factory_3d.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factory methods to build models.""" # Import libraries import tensorflow as tf from official.core import registry from official.vision.configs import video_classification as video_classification_cfg from official.vision.modeling import video_classification_model from official.vision.modeling import backbones _REGISTERED_MODEL_CLS = {} def register_model_builder(key: str): """Decorates a builder of model class. The builder should be a Callable (a class or a function). This decorator supports registration of backbone builder as follows: ``` class MyModel(tf.keras.Model): pass @register_backbone_builder('mybackbone') def builder(input_specs, config, l2_reg): return MyModel(...) # Builds a MyModel object. my_backbone = build_backbone_3d(input_specs, config, l2_reg) ``` Args: key: the key to look up the builder. Returns: A callable for use as class decorator that registers the decorated class for creation from an instance of model class. """ return registry.register(_REGISTERED_MODEL_CLS, key) def build_model( model_type: str, input_specs: tf.keras.layers.InputSpec, model_config: video_classification_cfg.hyperparams.Config, num_classes: int, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: """Builds backbone from a config. Args: model_type: string name of model type. It should be consistent with ModelConfig.model_type. input_specs: tf.keras.layers.InputSpec. model_config: a OneOfConfig. Model config. num_classes: number of classes. l2_regularizer: tf.keras.regularizers.Regularizer instance. Default to None. Returns: tf.keras.Model instance of the backbone. """ model_builder = registry.lookup(_REGISTERED_MODEL_CLS, model_type) return model_builder(input_specs, model_config, num_classes, l2_regularizer) @register_model_builder('video_classification') def build_video_classification_model( input_specs: tf.keras.layers.InputSpec, model_config: video_classification_cfg.VideoClassificationModel, num_classes: int, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: """Builds the video classification model.""" input_specs_dict = {'image': input_specs} norm_activation_config = model_config.norm_activation backbone = backbones.factory.build_backbone( input_specs=input_specs, backbone_config=model_config.backbone, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer) model = video_classification_model.VideoClassificationModel( backbone=backbone, num_classes=num_classes, input_specs=input_specs_dict, dropout_rate=model_config.dropout_rate, aggregate_endpoints=model_config.aggregate_endpoints, kernel_regularizer=l2_regularizer, require_endpoints=model_config.require_endpoints) return model
3,530
32.951923
84
py
models
models-master/official/vision/modeling/video_classification_model_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for video classification network.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.vision.modeling import backbones from official.vision.modeling import video_classification_model class VideoClassificationNetworkTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (50, 8, 112, 'relu', False), (50, 8, 112, 'swish', True), ) def test_resnet3d_network_creation(self, model_id, temporal_size, spatial_size, activation, aggregate_endpoints): """Test for creation of a ResNet3D-50 classifier.""" input_specs = tf.keras.layers.InputSpec( shape=[None, temporal_size, spatial_size, spatial_size, 3]) temporal_strides = [1, 1, 1, 1] temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)] tf.keras.backend.set_image_data_format('channels_last') backbone = backbones.ResNet3D( model_id=model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, input_specs=input_specs, activation=activation) num_classes = 1000 model = video_classification_model.VideoClassificationModel( backbone=backbone, num_classes=num_classes, input_specs={'image': input_specs}, dropout_rate=0.2, aggregate_endpoints=aggregate_endpoints, ) inputs = np.random.rand(2, temporal_size, spatial_size, spatial_size, 3) logits = model(inputs) self.assertAllEqual([2, num_classes], logits.numpy().shape) def test_serialize_deserialize(self): """Validate the classification network can be serialized and deserialized.""" model_id = 50 temporal_strides = [1, 1, 1, 1] temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)] backbone = backbones.ResNet3D( model_id=model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes) model = video_classification_model.VideoClassificationModel( backbone=backbone, num_classes=1000) config = model.get_config() new_model = video_classification_model.VideoClassificationModel.from_config( config) # Validate that the config can be forced to JSON. _ = new_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(model.get_config(), new_model.get_config()) if __name__ == '__main__': tf.test.main()
3,261
34.456522
81
py
models
models-master/official/vision/modeling/segmentation_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build segmentation models.""" from typing import Any, Mapping, Union, Optional, Dict # Import libraries import tensorflow as tf layers = tf.keras.layers @tf.keras.utils.register_keras_serializable(package='Vision') class SegmentationModel(tf.keras.Model): """A Segmentation class model. Input images are passed through backbone first. Decoder network is then applied, and finally, segmentation head is applied on the output of the decoder network. Layers such as ASPP should be part of decoder. Any feature fusion is done as part of the segmentation head (i.e. deeplabv3+ feature fusion is not part of the decoder, instead it is part of the segmentation head). This way, different feature fusion techniques can be combined with different backbones, and decoders. """ def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.Model, head: tf.keras.layers.Layer, mask_scoring_head: Optional[tf.keras.layers.Layer] = None, **kwargs): """Segmentation initialization function. Args: backbone: a backbone network. decoder: a decoder network. E.g. FPN. head: segmentation head. mask_scoring_head: mask scoring head. **kwargs: keyword arguments to be passed. """ super(SegmentationModel, self).__init__(**kwargs) self._config_dict = { 'backbone': backbone, 'decoder': decoder, 'head': head, 'mask_scoring_head': mask_scoring_head, } self.backbone = backbone self.decoder = decoder self.head = head self.mask_scoring_head = mask_scoring_head def call(self, inputs: tf.Tensor, training: bool = None # pytype: disable=signature-mismatch # overriding-parameter-count-checks ) -> Dict[str, tf.Tensor]: backbone_features = self.backbone(inputs) if self.decoder: decoder_features = self.decoder(backbone_features) else: decoder_features = backbone_features logits = self.head((backbone_features, decoder_features)) outputs = {'logits': logits} if self.mask_scoring_head: mask_scores = self.mask_scoring_head(logits) outputs.update({'mask_scores': mask_scores}) return outputs @property def checkpoint_items( self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]: """Returns a dictionary of items to be additionally checkpointed.""" items = dict(backbone=self.backbone, head=self.head) if self.decoder is not None: items.update(decoder=self.decoder) if self.mask_scoring_head is not None: items.update(mask_scoring_head=self.mask_scoring_head) return items def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
3,428
35.094737
132
py
models
models-master/official/vision/modeling/decoders/aspp.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of Atrous Spatial Pyramid Pooling (ASPP) decoder.""" from typing import Any, List, Mapping, Optional, Union # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.vision.modeling.decoders import factory from official.vision.modeling.layers import deeplab from official.vision.modeling.layers import nn_layers TensorMapUnion = Union[tf.Tensor, Mapping[str, tf.Tensor]] @tf.keras.utils.register_keras_serializable(package='Vision') class ASPP(tf.keras.layers.Layer): """Creates an Atrous Spatial Pyramid Pooling (ASPP) layer.""" def __init__( self, level: int, dilation_rates: List[int], num_filters: int = 256, pool_kernel_size: Optional[int] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, activation: str = 'relu', dropout_rate: float = 0.0, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', use_depthwise_convolution: bool = False, spp_layer_version: str = 'v1', output_tensor: bool = False, **kwargs): """Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer. Args: level: An `int` level to apply ASPP. dilation_rates: A `list` of dilation rates. num_filters: An `int` number of output filters in ASPP. pool_kernel_size: A `list` of [height, width] of pooling kernel size or None. Pooling size is with respect to original image size, it will be scaled down by 2**level. If None, global average pooling is used. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. activation: A `str` activation to be used in ASPP. dropout_rate: A `float` rate for dropout regularization. kernel_initializer: A `str` name of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. interpolation: A `str` of interpolation method. It should be one of `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, or `mitchellcubic`. use_depthwise_convolution: If True depthwise separable convolutions will be added to the Atrous spatial pyramid pooling. spp_layer_version: A `str` of spatial pyramid pooling layer version. output_tensor: Whether to output a single tensor or a dictionary of tensor. Default is false. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._config_dict = { 'level': level, 'dilation_rates': dilation_rates, 'num_filters': num_filters, 'pool_kernel_size': pool_kernel_size, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'activation': activation, 'dropout_rate': dropout_rate, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'interpolation': interpolation, 'use_depthwise_convolution': use_depthwise_convolution, 'spp_layer_version': spp_layer_version, 'output_tensor': output_tensor } self._aspp_layer = deeplab.SpatialPyramidPooling if self._config_dict[ 'spp_layer_version'] == 'v1' else nn_layers.SpatialPyramidPooling def build(self, input_shape): pool_kernel_size = None if self._config_dict['pool_kernel_size']: pool_kernel_size = [ int(p_size // 2**self._config_dict['level']) for p_size in self._config_dict['pool_kernel_size'] # pytype: disable=attribute-error # trace-all-classes ] self.aspp = self._aspp_layer( output_channels=self._config_dict['num_filters'], dilation_rates=self._config_dict['dilation_rates'], pool_kernel_size=pool_kernel_size, use_sync_bn=self._config_dict['use_sync_bn'], batchnorm_momentum=self._config_dict['norm_momentum'], batchnorm_epsilon=self._config_dict['norm_epsilon'], activation=self._config_dict['activation'], dropout=self._config_dict['dropout_rate'], kernel_initializer=self._config_dict['kernel_initializer'], kernel_regularizer=self._config_dict['kernel_regularizer'], interpolation=self._config_dict['interpolation'], use_depthwise_convolution=self._config_dict['use_depthwise_convolution'] ) def call(self, inputs: TensorMapUnion) -> TensorMapUnion: """Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input. The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one level is present, if output_tensor is false. Hence, this will be compatible with the rest of the segmentation model interfaces. If output_tensor is true, a single tensot is output. Args: inputs: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel feature maps. - values: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size]. Returns: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel feature maps. - values: A `tf.Tensor` of output of ASPP module. """ outputs = {} level = str(self._config_dict['level']) backbone_output = inputs[level] if isinstance(inputs, dict) else inputs outputs = self.aspp(backbone_output) return outputs if self._config_dict['output_tensor'] else {level: outputs} def get_config(self) -> Mapping[str, Any]: base_config = super().get_config() return dict(list(base_config.items()) + list(self._config_dict.items())) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @factory.register_decoder_builder('aspp') def build_aspp_decoder( input_specs: Mapping[str, tf.TensorShape], model_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds ASPP decoder from a config. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. Note this is for consistent interface, and is not used by ASPP decoder. model_config: A OneOfConfig. Model config. l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to None. Returns: A `tf.keras.Model` instance of the ASPP decoder. Raises: ValueError: If the model_config.decoder.type is not `aspp`. """ del input_specs # input_specs is not used by ASPP decoder. decoder_type = model_config.decoder.type decoder_cfg = model_config.decoder.get() if decoder_type != 'aspp': raise ValueError(f'Inconsistent decoder type {decoder_type}. ' 'Need to be `aspp`.') norm_activation_config = model_config.norm_activation return ASPP( level=decoder_cfg.level, dilation_rates=decoder_cfg.dilation_rates, num_filters=decoder_cfg.num_filters, use_depthwise_convolution=decoder_cfg.use_depthwise_convolution, pool_kernel_size=decoder_cfg.pool_kernel_size, dropout_rate=decoder_cfg.dropout_rate, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, activation=norm_activation_config.activation, kernel_regularizer=l2_regularizer, spp_layer_version=decoder_cfg.spp_layer_version, output_tensor=decoder_cfg.output_tensor)
8,600
41.161765
117
py
models
models-master/official/vision/modeling/decoders/aspp_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for aspp.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import resnet from official.vision.modeling.decoders import aspp class ASPPTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (3, [6, 12, 18, 24], 128, 'v1'), (3, [6, 12, 18], 128, 'v1'), (3, [6, 12], 256, 'v1'), (4, [6, 12, 18, 24], 128, 'v2'), (4, [6, 12, 18], 128, 'v2'), (4, [6, 12], 256, 'v2'), ) def test_network_creation(self, level, dilation_rates, num_filters, spp_layer_version): """Test creation of ASPP.""" input_size = 256 tf.keras.backend.set_image_data_format('channels_last') inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) backbone = resnet.ResNet(model_id=50) network = aspp.ASPP( level=level, dilation_rates=dilation_rates, num_filters=num_filters, spp_layer_version=spp_layer_version) endpoints = backbone(inputs) feats = network(endpoints) self.assertIn(str(level), feats) self.assertAllEqual( [1, input_size // 2**level, input_size // 2**level, num_filters], feats[str(level)].shape.as_list()) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( level=3, dilation_rates=[6, 12], num_filters=256, pool_kernel_size=None, use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, activation='relu', kernel_initializer='VarianceScaling', kernel_regularizer=None, interpolation='bilinear', dropout_rate=0.2, use_depthwise_convolution='false', spp_layer_version='v1', output_tensor=False, dtype='float32', name='aspp', trainable=True) network = aspp.ASPP(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = aspp.ASPP.from_config(network.get_config()) # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,001
30.93617
79
py
models
models-master/official/vision/modeling/decoders/fpn_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for FPN.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import mobilenet from official.vision.modeling.backbones import resnet from official.vision.modeling.decoders import fpn class FPNTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (256, 3, 7, False, False, 'sum'), (256, 3, 7, False, True, 'sum'), (256, 3, 7, True, False, 'concat'), (256, 3, 7, True, True, 'concat'), ) def test_network_creation(self, input_size, min_level, max_level, use_separable_conv, use_keras_layer, fusion_type): """Test creation of FPN.""" tf.keras.backend.set_image_data_format('channels_last') inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) backbone = resnet.ResNet(model_id=50) network = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level, fusion_type=fusion_type, use_separable_conv=use_separable_conv, use_keras_layer=use_keras_layer) endpoints = backbone(inputs) feats = network(endpoints) for level in range(min_level, max_level + 1): self.assertIn(str(level), feats) self.assertAllEqual( [1, input_size // 2**level, input_size // 2**level, 256], feats[str(level)].shape.as_list()) @parameterized.parameters( (256, 3, 7, False, False), (256, 3, 7, False, True), (256, 3, 7, True, False), (256, 3, 7, True, True), ) def test_network_creation_with_mobilenet(self, input_size, min_level, max_level, use_separable_conv, use_keras_layer): """Test creation of FPN with mobilenet backbone.""" tf.keras.backend.set_image_data_format('channels_last') inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) backbone = mobilenet.MobileNet(model_id='MobileNetV2') network = fpn.FPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level, use_separable_conv=use_separable_conv, use_keras_layer=use_keras_layer) endpoints = backbone(inputs) feats = network(endpoints) for level in range(min_level, max_level + 1): self.assertIn(str(level), feats) self.assertAllEqual( [1, input_size // 2**level, input_size // 2**level, 256], feats[str(level)].shape.as_list()) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( input_specs=resnet.ResNet(model_id=50).output_specs, min_level=3, max_level=7, num_filters=256, fusion_type='sum', use_separable_conv=False, use_keras_layer=False, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, ) network = fpn.FPN(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = fpn.FPN.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
4,274
33.2
79
py
models
models-master/official/vision/modeling/decoders/nasfpn_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for NAS-FPN.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import resnet from official.vision.modeling.decoders import nasfpn class NASFPNTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (256, 3, 7, False), (256, 3, 7, True), ) def test_network_creation(self, input_size, min_level, max_level, use_separable_conv): """Test creation of NAS-FPN.""" tf.keras.backend.set_image_data_format('channels_last') inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) num_filters = 256 backbone = resnet.ResNet(model_id=50) network = nasfpn.NASFPN( input_specs=backbone.output_specs, min_level=min_level, max_level=max_level, num_filters=num_filters, use_separable_conv=use_separable_conv) endpoints = backbone(inputs) feats = network(endpoints) for level in range(min_level, max_level + 1): self.assertIn(str(level), feats) self.assertAllEqual( [1, input_size // 2**level, input_size // 2**level, num_filters], feats[str(level)].shape.as_list()) if __name__ == '__main__': tf.test.main()
1,887
31
76
py
models
models-master/official/vision/modeling/decoders/nasfpn.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of NAS-FPN.""" from typing import Any, List, Mapping, Optional, Tuple # Import libraries from absl import logging import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.decoders import factory from official.vision.ops import spatial_transform_ops # The fixed NAS-FPN architecture discovered by NAS. # Each element represents a specification of a building block: # (block_level, combine_fn, (input_offset0, input_offset1), is_output). NASFPN_BLOCK_SPECS = [ (4, 'attention', (1, 3), False), (4, 'sum', (1, 5), False), (3, 'sum', (0, 6), True), (4, 'sum', (6, 7), True), (5, 'attention', (7, 8), True), (7, 'attention', (6, 9), True), (6, 'attention', (9, 10), True), ] class BlockSpec(): """A container class that specifies the block configuration for NAS-FPN.""" def __init__(self, level: int, combine_fn: str, input_offsets: Tuple[int, int], is_output: bool): self.level = level self.combine_fn = combine_fn self.input_offsets = input_offsets self.is_output = is_output def build_block_specs( block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]: """Builds the list of BlockSpec objects for NAS-FPN.""" if not block_specs: block_specs = NASFPN_BLOCK_SPECS logging.info('Building NAS-FPN block specs: %s', block_specs) return [BlockSpec(*b) for b in block_specs] @tf.keras.utils.register_keras_serializable(package='Vision') class NASFPN(tf.keras.Model): """Creates a NAS-FPN model. This implements the paper: Golnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le. NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection. (https://arxiv.org/abs/1904.07392) """ def __init__( self, input_specs: Mapping[str, tf.TensorShape], min_level: int = 3, max_level: int = 7, block_specs: Optional[List[BlockSpec]] = None, num_filters: int = 256, num_repeats: int = 5, use_separable_conv: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a NAS-FPN model. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. min_level: An `int` of minimum level in FPN output feature maps. max_level: An `int` of maximum level in FPN output feature maps. block_specs: a list of BlockSpec objects that specifies the NAS-FPN network topology. By default, the previously discovered architecture is used. num_filters: An `int` number of filters in FPN layers. num_repeats: number of repeats for feature pyramid network. use_separable_conv: A `bool`. If True use separable convolution for convolution in FPN layers. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A `str` name of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ self._config_dict = { 'input_specs': input_specs, 'min_level': min_level, 'max_level': max_level, 'num_filters': num_filters, 'num_repeats': num_repeats, 'use_separable_conv': use_separable_conv, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } self._min_level = min_level self._max_level = max_level self._block_specs = ( build_block_specs() if block_specs is None else block_specs ) self._num_repeats = num_repeats self._conv_op = (tf.keras.layers.SeparableConv2D if self._config_dict['use_separable_conv'] else tf.keras.layers.Conv2D) self._norm_op = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._norm_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], 'synchronized': self._config_dict['use_sync_bn'], } self._activation = tf_utils.get_activation(activation) # Gets input feature pyramid from backbone. inputs = self._build_input_pyramid(input_specs, min_level) # Projects the input features. feats = [] for level in range(self._min_level, self._max_level + 1): if str(level) in inputs.keys(): feats.append(self._resample_feature_map( inputs[str(level)], level, level, self._config_dict['num_filters'])) else: feats.append(self._resample_feature_map( feats[-1], level - 1, level, self._config_dict['num_filters'])) # Repeatly builds the NAS-FPN modules. for _ in range(self._num_repeats): output_feats = self._build_feature_pyramid(feats) feats = [output_feats[level] for level in range(self._min_level, self._max_level + 1)] self._output_specs = { str(level): output_feats[level].get_shape() for level in range(min_level, max_level + 1) } output_feats = {str(level): output_feats[level] for level in output_feats.keys()} super(NASFPN, self).__init__(inputs=inputs, outputs=output_feats, **kwargs) def _build_input_pyramid(self, input_specs: Mapping[str, tf.TensorShape], min_level: int): assert isinstance(input_specs, dict) if min(input_specs.keys()) > str(min_level): raise ValueError( 'Backbone min level should be less or equal to FPN min level') inputs = {} for level, spec in input_specs.items(): inputs[level] = tf.keras.Input(shape=spec[1:]) return inputs def _resample_feature_map(self, inputs, input_level, target_level, target_num_filters=256): x = inputs _, _, _, input_num_filters = x.get_shape().as_list() if input_num_filters != target_num_filters: x = self._conv_op( filters=target_num_filters, kernel_size=1, padding='same', **self._conv_kwargs)(x) x = self._norm_op(**self._norm_kwargs)(x) if input_level < target_level: stride = int(2 ** (target_level - input_level)) return tf.keras.layers.MaxPool2D( pool_size=stride, strides=stride, padding='same')(x) if input_level > target_level: scale = int(2 ** (input_level - target_level)) return spatial_transform_ops.nearest_upsampling(x, scale=scale) # Force output x to be the same dtype as mixed precision policy. This avoids # dtype mismatch when one input (by default float32 dtype) does not meet all # the above conditions and is output unchanged, while other inputs are # processed to have different dtype, e.g., using bfloat16 on TPU. compute_dtype = tf.keras.layers.Layer().dtype_policy.compute_dtype if (compute_dtype is not None) and (x.dtype != compute_dtype): return tf.cast(x, dtype=compute_dtype) else: return x @property def _conv_kwargs(self): if self._config_dict['use_separable_conv']: return { 'depthwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'pointwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'depthwise_regularizer': self._config_dict['kernel_regularizer'], 'pointwise_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], } else: return { 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], } def _global_attention(self, feat0, feat1): m = tf.math.reduce_max(feat0, axis=[1, 2], keepdims=True) m = tf.math.sigmoid(m) return feat0 + feat1 * m def _build_feature_pyramid(self, feats): num_output_connections = [0] * len(feats) num_output_levels = self._max_level - self._min_level + 1 feat_levels = list(range(self._min_level, self._max_level + 1)) for i, block_spec in enumerate(self._block_specs): new_level = block_spec.level # Checks the range of input_offsets. for input_offset in block_spec.input_offsets: if input_offset >= len(feats): raise ValueError( 'input_offset ({}) is larger than num feats({})'.format( input_offset, len(feats))) input0 = block_spec.input_offsets[0] input1 = block_spec.input_offsets[1] # Update graph with inputs. node0 = feats[input0] node0_level = feat_levels[input0] num_output_connections[input0] += 1 node0 = self._resample_feature_map(node0, node0_level, new_level) node1 = feats[input1] node1_level = feat_levels[input1] num_output_connections[input1] += 1 node1 = self._resample_feature_map(node1, node1_level, new_level) # Combine node0 and node1 to create new feat. if block_spec.combine_fn == 'sum': new_node = node0 + node1 elif block_spec.combine_fn == 'attention': if node0_level >= node1_level: new_node = self._global_attention(node0, node1) else: new_node = self._global_attention(node1, node0) else: raise ValueError('unknown combine_fn `{}`.' .format(block_spec.combine_fn)) # Add intermediate nodes that do not have any connections to output. if block_spec.is_output: for j, (feat, feat_level, num_output) in enumerate( zip(feats, feat_levels, num_output_connections)): if num_output == 0 and feat_level == new_level: num_output_connections[j] += 1 feat_ = self._resample_feature_map(feat, feat_level, new_level) new_node += feat_ new_node = self._activation(new_node) new_node = self._conv_op( filters=self._config_dict['num_filters'], kernel_size=(3, 3), padding='same', **self._conv_kwargs)(new_node) new_node = self._norm_op(**self._norm_kwargs)(new_node) feats.append(new_node) feat_levels.append(new_level) num_output_connections.append(0) output_feats = {} for i in range(len(feats) - num_output_levels, len(feats)): level = feat_levels[i] output_feats[level] = feats[i] logging.info('Output feature pyramid: %s', output_feats) return output_feats def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self) -> Mapping[str, tf.TensorShape]: """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_decoder_builder('nasfpn') def build_nasfpn_decoder( input_specs: Mapping[str, tf.TensorShape], model_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds NASFPN decoder from a config. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. model_config: A OneOfConfig. Model config. l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to None. Returns: A `tf.keras.Model` instance of the NASFPN decoder. Raises: ValueError: If the model_config.decoder.type is not `nasfpn`. """ decoder_type = model_config.decoder.type decoder_cfg = model_config.decoder.get() if decoder_type != 'nasfpn': raise ValueError(f'Inconsistent decoder type {decoder_type}. ' 'Need to be `nasfpn`.') norm_activation_config = model_config.norm_activation return NASFPN( input_specs=input_specs, min_level=model_config.min_level, max_level=model_config.max_level, num_filters=decoder_cfg.num_filters, num_repeats=decoder_cfg.num_repeats, use_separable_conv=decoder_cfg.use_separable_conv, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
14,430
37.689008
80
py
models
models-master/official/vision/modeling/decoders/factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Decoder registers and factory method. One can register a new decoder model by the following two steps: 1 Import the factory and register the build in the decoder file. 2 Import the decoder class and add a build in __init__.py. ``` # my_decoder.py from modeling.decoders import factory class MyDecoder(): ... @factory.register_decoder_builder('my_decoder') def build_my_decoder(): return MyDecoder() # decoders/__init__.py adds import from modeling.decoders.my_decoder import MyDecoder ``` If one wants the MyDecoder class to be used only by those binary then don't imported the decoder module in decoders/__init__.py, but import it in place that uses it. """ from typing import Any, Callable, Mapping, Optional, Union # Import libraries import tensorflow as tf from official.core import registry from official.modeling import hyperparams _REGISTERED_DECODER_CLS = {} def register_decoder_builder(key: str) -> Callable[..., Any]: """Decorates a builder of decoder class. The builder should be a Callable (a class or a function). This decorator supports registration of decoder builder as follows: ``` class MyDecoder(tf.keras.Model): pass @register_decoder_builder('mydecoder') def builder(input_specs, config, l2_reg): return MyDecoder(...) # Builds a MyDecoder object. my_decoder = build_decoder_3d(input_specs, config, l2_reg) ``` Args: key: A `str` of key to look up the builder. Returns: A callable for using as class decorator that registers the decorated class for creation from an instance of task_config_cls. """ return registry.register(_REGISTERED_DECODER_CLS, key) @register_decoder_builder('identity') def build_identity( input_specs: Optional[Mapping[str, tf.TensorShape]] = None, model_config: Optional[hyperparams.Config] = None, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None) -> None: """Builds identity decoder from a config. All the input arguments are not used by identity decoder but kept here to ensure the interface is consistent. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. model_config: A `OneOfConfig` of model config. l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to None. Returns: An instance of the identity decoder. """ del input_specs, model_config, l2_regularizer # Unused by identity decoder. def build_decoder( input_specs: Mapping[str, tf.TensorShape], model_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None, **kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]: # pytype: disable=annotation-type-mismatch # typed-keras """Builds decoder from a config. A decoder can be a keras.Model, a keras.layers.Layer, or None. If it is not None, the decoder will take features from the backbone as input and generate decoded feature maps. If it is None, such as an identity decoder, the decoder is skipped and features from the backbone are regarded as model output. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. model_config: A `OneOfConfig` of model config. l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to None. **kwargs: Additional keyword args to be passed to decoder builder. Returns: An instance of the decoder. """ decoder_builder = registry.lookup(_REGISTERED_DECODER_CLS, model_config.decoder.type) return decoder_builder( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer, **kwargs)
4,387
31.264706
127
py
models
models-master/official/vision/modeling/decoders/fpn.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the definitions of Feature Pyramid Networks (FPN).""" from typing import Any, Mapping, Optional # Import libraries from absl import logging import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.decoders import factory from official.vision.ops import spatial_transform_ops @tf.keras.utils.register_keras_serializable(package='Vision') class FPN(tf.keras.Model): """Creates a Feature Pyramid Network (FPN). This implements the paper: Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. (https://arxiv.org/pdf/1612.03144) """ def __init__( self, input_specs: Mapping[str, tf.TensorShape], min_level: int = 3, max_level: int = 7, num_filters: int = 256, fusion_type: str = 'sum', use_separable_conv: bool = False, use_keras_layer: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a Feature Pyramid Network (FPN). Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. min_level: An `int` of minimum level in FPN output feature maps. max_level: An `int` of maximum level in FPN output feature maps. num_filters: An `int` number of filters in FPN layers. fusion_type: A `str` of `sum` or `concat`. Whether performing sum or concat for feature fusion. use_separable_conv: A `bool`. If True use separable convolution for convolution in FPN layers. use_keras_layer: A `bool`. If Ture use keras layers as many as possible. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A `str` name of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ self._config_dict = { 'input_specs': input_specs, 'min_level': min_level, 'max_level': max_level, 'num_filters': num_filters, 'fusion_type': fusion_type, 'use_separable_conv': use_separable_conv, 'use_keras_layer': use_keras_layer, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_initializer': kernel_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } if use_separable_conv: conv2d = tf.keras.layers.SeparableConv2D else: conv2d = tf.keras.layers.Conv2D norm = tf.keras.layers.BatchNormalization activation_fn = tf_utils.get_activation(activation, use_keras_layer=True) # Build input feature pyramid. if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 # Get input feature pyramid from backbone. logging.info('FPN input_specs: %s', input_specs) inputs = self._build_input_pyramid(input_specs, min_level) backbone_max_level = min(int(max(inputs.keys())), max_level) # Build lateral connections. feats_lateral = {} for level in range(min_level, backbone_max_level + 1): feats_lateral[str(level)] = conv2d( filters=num_filters, kernel_size=1, padding='same', kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, name=f'lateral_{level}')( inputs[str(level)]) # Build top-down path. feats = {str(backbone_max_level): feats_lateral[str(backbone_max_level)]} for level in range(backbone_max_level - 1, min_level - 1, -1): feat_a = spatial_transform_ops.nearest_upsampling( feats[str(level + 1)], 2, use_keras_layer=use_keras_layer) feat_b = feats_lateral[str(level)] if fusion_type == 'sum': if use_keras_layer: feats[str(level)] = tf.keras.layers.Add()([feat_a, feat_b]) else: feats[str(level)] = feat_a + feat_b elif fusion_type == 'concat': if use_keras_layer: feats[str(level)] = tf.keras.layers.Concatenate(axis=-1)( [feat_a, feat_b]) else: feats[str(level)] = tf.concat([feat_a, feat_b], axis=-1) else: raise ValueError('Fusion type {} not supported.'.format(fusion_type)) # TODO(fyangf): experiment with removing bias in conv2d. # Build post-hoc 3x3 convolution kernel. for level in range(min_level, backbone_max_level + 1): feats[str(level)] = conv2d( filters=num_filters, strides=1, kernel_size=3, padding='same', kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, name=f'post_hoc_{level}')( feats[str(level)]) # TODO(fyangf): experiment with removing bias in conv2d. # Build coarser FPN levels introduced for RetinaNet. for level in range(backbone_max_level + 1, max_level + 1): feats_in = feats[str(level - 1)] if level > backbone_max_level + 1: feats_in = activation_fn(feats_in) feats[str(level)] = conv2d( filters=num_filters, strides=2, kernel_size=3, padding='same', kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, name=f'coarser_{level}')( feats_in) # Apply batch norm layers. for level in range(min_level, max_level + 1): feats[str(level)] = norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn, name=f'norm_{level}')( feats[str(level)]) self._output_specs = { str(level): feats[str(level)].get_shape() for level in range(min_level, max_level + 1) } super(FPN, self).__init__(inputs=inputs, outputs=feats, **kwargs) def _build_input_pyramid(self, input_specs: Mapping[str, tf.TensorShape], min_level: int): assert isinstance(input_specs, dict) if min(input_specs.keys()) > str(min_level): raise ValueError( 'Backbone min level should be less or equal to FPN min level') inputs = {} for level, spec in input_specs.items(): inputs[level] = tf.keras.Input(shape=spec[1:]) return inputs def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self) -> Mapping[str, tf.TensorShape]: """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_decoder_builder('fpn') def build_fpn_decoder( input_specs: Mapping[str, tf.TensorShape], model_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds FPN decoder from a config. Args: input_specs: A `dict` of input specifications. A dictionary consists of {level: TensorShape} from a backbone. model_config: A OneOfConfig. Model config. l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to None. Returns: A `tf.keras.Model` instance of the FPN decoder. Raises: ValueError: If the model_config.decoder.type is not `fpn`. """ decoder_type = model_config.decoder.type decoder_cfg = model_config.decoder.get() if decoder_type != 'fpn': raise ValueError(f'Inconsistent decoder type {decoder_type}. ' 'Need to be `fpn`.') norm_activation_config = model_config.norm_activation return FPN( input_specs=input_specs, min_level=model_config.min_level, max_level=model_config.max_level, num_filters=decoder_cfg.num_filters, fusion_type=decoder_cfg.fusion_type, use_separable_conv=decoder_cfg.use_separable_conv, use_keras_layer=decoder_cfg.use_keras_layer, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
9,828
36.659004
80
py
models
models-master/official/vision/modeling/layers/deeplab_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ASPP.""" from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.layers import deeplab class DeeplabTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( (None,), ([32, 32],), ) def test_aspp(self, pool_kernel_size): del pool_kernel_size inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32) layer = deeplab.SpatialPyramidPooling(output_channels=256, dilation_rates=[6, 12, 18], pool_kernel_size=None) output = layer(inputs) self.assertAllEqual([None, 64, 64, 256], output.shape) def test_aspp_invalid_shape(self): inputs = tf.keras.Input(shape=(64, 64), dtype=tf.float32) layer = deeplab.SpatialPyramidPooling(output_channels=256, dilation_rates=[6, 12, 18]) with self.assertRaises(ValueError): _ = layer(inputs) def test_config_with_custom_name(self): layer = deeplab.SpatialPyramidPooling(256, [5], name='aspp') config = layer.get_config() layer_1 = deeplab.SpatialPyramidPooling.from_config(config) self.assertEqual(layer_1.name, layer.name) if __name__ == '__main__': tf.test.main()
1,894
34.092593
74
py
models
models-master/official/vision/modeling/layers/roi_sampler.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of ROI sampler.""" from typing import Optional, Tuple, Union # Import libraries import tensorflow as tf from official.vision.modeling.layers import box_sampler from official.vision.ops import box_matcher from official.vision.ops import iou_similarity from official.vision.ops import target_gather # The return type can be a tuple of 4 or 5 tf.Tensor. ROISamplerReturnType = Union[ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]] @tf.keras.utils.register_keras_serializable(package='Vision') class ROISampler(tf.keras.layers.Layer): """Samples ROIs and assigns targets to the sampled ROIs.""" def __init__(self, mix_gt_boxes: bool = True, num_sampled_rois: int = 512, foreground_fraction: float = 0.25, foreground_iou_threshold: float = 0.5, background_iou_high_threshold: float = 0.5, background_iou_low_threshold: float = 0, skip_subsampling: bool = False, **kwargs): """Initializes a ROI sampler. Args: mix_gt_boxes: A `bool` of whether to mix the groundtruth boxes with proposed ROIs. num_sampled_rois: An `int` of the number of sampled ROIs per image. foreground_fraction: A `float` in [0, 1], what percentage of proposed ROIs should be sampled from the foreground boxes. foreground_iou_threshold: A `float` that represents the IoU threshold for a box to be considered as positive (if >= `foreground_iou_threshold`). background_iou_high_threshold: A `float` that represents the IoU threshold for a box to be considered as negative (if overlap in [`background_iou_low_threshold`, `background_iou_high_threshold`]). background_iou_low_threshold: A `float` that represents the IoU threshold for a box to be considered as negative (if overlap in [`background_iou_low_threshold`, `background_iou_high_threshold`]) skip_subsampling: a bool that determines if we want to skip the sampling procedure than balances the fg/bg classes. Used for upper frcnn layers in cascade RCNN. **kwargs: Additional keyword arguments passed to Layer. """ self._config_dict = { 'mix_gt_boxes': mix_gt_boxes, 'num_sampled_rois': num_sampled_rois, 'foreground_fraction': foreground_fraction, 'foreground_iou_threshold': foreground_iou_threshold, 'background_iou_high_threshold': background_iou_high_threshold, 'background_iou_low_threshold': background_iou_low_threshold, 'skip_subsampling': skip_subsampling, } self._sim_calc = iou_similarity.IouSimilarity() self._box_matcher = box_matcher.BoxMatcher( thresholds=[ background_iou_low_threshold, background_iou_high_threshold, foreground_iou_threshold ], indicators=[-3, -1, -2, 1]) self._target_gather = target_gather.TargetGather() self._sampler = box_sampler.BoxSampler( num_sampled_rois, foreground_fraction) super().__init__(**kwargs) def call( self, boxes: tf.Tensor, gt_boxes: tf.Tensor, gt_classes: tf.Tensor, gt_outer_boxes: Optional[tf.Tensor] = None) -> ROISamplerReturnType: """Assigns the proposals with groundtruth classes and performs subsmpling. Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the following algorithm to generate the final `num_samples_per_image` RoIs. 1. Calculates the IoU between each proposal box and each gt_boxes. 2. Assigns each proposed box with a groundtruth class and box by choosing the largest IoU overlap. 3. Samples `num_samples_per_image` boxes from all proposed boxes, and returns box_targets, class_targets, and RoIs. Args: boxes: A `tf.Tensor` of shape of [batch_size, N, 4]. N is the number of proposals before groundtruth assignment. The last dimension is the box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax] format. gt_boxes: A `tf.Tensor` of shape of [batch_size, MAX_NUM_INSTANCES, 4]. The coordinates of gt_boxes are in the pixel coordinates of the scaled image. This tensor might have padding of values -1 indicating the invalid box coordinates. gt_classes: A `tf.Tensor` with a shape of [batch_size, MAX_NUM_INSTANCES]. This tensor might have paddings with values of -1 indicating the invalid classes. gt_outer_boxes: A `tf.Tensor` of shape of [batch_size, MAX_NUM_INSTANCES, 4]. The corrdinates of gt_outer_boxes are in the pixel coordinates of the scaled image. This tensor might have padding of values -1 indicating the invalid box coordinates. Ignored if not provided. Returns: sampled_rois: A `tf.Tensor` of shape of [batch_size, K, 4], representing the coordinates of the sampled RoIs, where K is the number of the sampled RoIs, i.e. K = num_samples_per_image. sampled_gt_boxes: A `tf.Tensor` of shape of [batch_size, K, 4], storing the box coordinates of the matched groundtruth boxes of the samples RoIs. sampled_gt_outer_boxes: A `tf.Tensor` of shape of [batch_size, K, 4], storing the box coordinates of the matched groundtruth outer boxes of the samples RoIs. This field is missing if gt_outer_boxes is None. sampled_gt_classes: A `tf.Tensor` of shape of [batch_size, K], storing the classes of the matched groundtruth boxes of the sampled RoIs. sampled_gt_indices: A `tf.Tensor` of shape of [batch_size, K], storing the indices of the sampled groudntruth boxes in the original `gt_boxes` tensor, i.e., gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i]. """ gt_boxes = tf.cast(gt_boxes, dtype=boxes.dtype) if self._config_dict['mix_gt_boxes']: boxes = tf.concat([boxes, gt_boxes], axis=1) boxes_invalid_mask = tf.less( tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0) gt_invalid_mask = tf.less( tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0) similarity_matrix = self._sim_calc(boxes, gt_boxes, boxes_invalid_mask, gt_invalid_mask) matched_gt_indices, match_indicators = self._box_matcher(similarity_matrix) positive_matches = tf.greater_equal(match_indicators, 0) negative_matches = tf.equal(match_indicators, -1) ignored_matches = tf.equal(match_indicators, -2) invalid_matches = tf.equal(match_indicators, -3) background_mask = tf.expand_dims( tf.logical_or(negative_matches, invalid_matches), -1) gt_classes = tf.expand_dims(gt_classes, axis=-1) matched_gt_classes = self._target_gather(gt_classes, matched_gt_indices, background_mask) matched_gt_classes = tf.where(background_mask, tf.zeros_like(matched_gt_classes), matched_gt_classes) matched_gt_boxes = self._target_gather(gt_boxes, matched_gt_indices, tf.tile(background_mask, [1, 1, 4])) matched_gt_boxes = tf.where(background_mask, tf.zeros_like(matched_gt_boxes), matched_gt_boxes) if gt_outer_boxes is not None: matched_gt_outer_boxes = self._target_gather( gt_outer_boxes, matched_gt_indices, tf.tile(background_mask, [1, 1, 4])) matched_gt_outer_boxes = tf.where(background_mask, tf.zeros_like(matched_gt_outer_boxes), matched_gt_outer_boxes) matched_gt_indices = tf.where( tf.squeeze(background_mask, -1), -tf.ones_like(matched_gt_indices), matched_gt_indices) if self._config_dict['skip_subsampling']: matched_gt_classes = tf.squeeze(matched_gt_classes, axis=-1) if gt_outer_boxes is None: return (boxes, matched_gt_boxes, matched_gt_classes, matched_gt_indices) return (boxes, matched_gt_boxes, matched_gt_outer_boxes, matched_gt_classes, matched_gt_indices) sampled_indices = self._sampler( positive_matches, negative_matches, ignored_matches) sampled_rois = self._target_gather(boxes, sampled_indices) sampled_gt_boxes = self._target_gather(matched_gt_boxes, sampled_indices) sampled_gt_classes = tf.squeeze(self._target_gather( matched_gt_classes, sampled_indices), axis=-1) sampled_gt_indices = tf.squeeze(self._target_gather( tf.expand_dims(matched_gt_indices, -1), sampled_indices), axis=-1) if gt_outer_boxes is None: return (sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_indices) sampled_gt_outer_boxes = self._target_gather(matched_gt_outer_boxes, sampled_indices) return (sampled_rois, sampled_gt_boxes, sampled_gt_outer_boxes, sampled_gt_classes, sampled_gt_indices) def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
9,996
47.0625
80
py
models
models-master/official/vision/modeling/layers/detection_generator.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of generators to generate the final detections.""" import contextlib from typing import Any, Dict, List, Optional, Mapping, Sequence, Tuple # Import libraries import numpy as np import tensorflow as tf from official.vision.modeling.layers import edgetpu from official.vision.ops import box_ops from official.vision.ops import nms from official.vision.ops import preprocess_ops def _generate_detections_v1( boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[str, tf.Tensor]] = None, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, soft_nms_sigma: Optional[float] = None, ): """Generates the final detections given the model outputs. The implementation unrolls the batch dimension and process images one by one. It required the batch dimension to be statically known and it is TPU compatible. Args: boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or `[batch_size, N, 1, 4]` for box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. attributes: None or a dict of (attribute_name, attributes) pairs. Each attributes is a `tf.Tensor` with shape `[batch_size, N, num_classes, attribute_size]` or `[batch_size, N, 1, attribute_size]` for attribute predictions on all feature levels. The N is the number of total anchors on all levels. Can be None if no attribute learning is required. pre_nms_top_k: An `int` number of top candidate detections per class before NMS. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A scalar representing maximum number of boxes retained over all classes. soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS. When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS. Returns: nms_boxes: A `float` type `tf.Tensor` of shape `[batch_size, max_num_detections, 4]` representing top detected boxes in `[y1, x1, y2, x2]`. nms_scores: A `float` type `tf.Tensor` of shape `[batch_size, max_num_detections]` representing sorted confidence scores for detected boxes. The values are between `[0, 1]`. nms_classes: An `int` type `tf.Tensor` of shape `[batch_size, max_num_detections]` representing classes for detected boxes. valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the top `valid_detections` boxes are valid detections. nms_attributes: None or a dict of (attribute_name, attributes). Each attribute is a `float` type `tf.Tensor` of shape `[batch_size, max_num_detections, attribute_size]` representing attribute predictions for detected boxes. Can be an empty dict if no attribute learning is required. """ with tf.name_scope('generate_detections'): batch_size = scores.get_shape().as_list()[0] nmsed_boxes = [] nmsed_classes = [] nmsed_scores = [] valid_detections = [] if attributes: nmsed_attributes = {att_name: [] for att_name in attributes.keys()} else: nmsed_attributes = {} for i in range(batch_size): ( nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i, nmsed_att_i, ) = _generate_detections_per_image( boxes[i], scores[i], attributes={att_name: att[i] for att_name, att in attributes.items()} if attributes else {}, pre_nms_top_k=pre_nms_top_k, pre_nms_score_threshold=pre_nms_score_threshold, nms_iou_threshold=nms_iou_threshold, max_num_detections=max_num_detections, soft_nms_sigma=soft_nms_sigma, ) nmsed_boxes.append(nmsed_boxes_i) nmsed_scores.append(nmsed_scores_i) nmsed_classes.append(nmsed_classes_i) valid_detections.append(valid_detections_i) if attributes: for att_name in attributes.keys(): nmsed_attributes[att_name].append(nmsed_att_i[att_name]) nmsed_boxes = tf.stack(nmsed_boxes, axis=0) nmsed_scores = tf.stack(nmsed_scores, axis=0) nmsed_classes = tf.stack(nmsed_classes, axis=0) valid_detections = tf.stack(valid_detections, axis=0) if attributes: for att_name in attributes.keys(): nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0) return ( nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes, ) def _generate_detections_per_image( boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[str, tf.Tensor]] = None, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, soft_nms_sigma: Optional[float] = None, ): """Generates the final detections per image given the model outputs. Args: boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. attributes: If not None, a dict of `tf.Tensor`. Each value is in shape `[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of attribute predictions on all feature levels. The N is the number of total anchors on all levels. pre_nms_top_k: An `int` number of top candidate detections per class before NMS. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS. When soft_nms_sigma=0.0, we fall back to standard NMS. If set to None, `tf.image.non_max_suppression_padded` is called instead. Returns: nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]` representing top detected boxes in `[y1, x1, y2, x2]`. nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [1] only the top `valid_detections` boxes are valid detections. nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape `[max_num_detections, attribute_size]` representing attribute predictions for detected boxes. Can be an empty dict if `attributes` is None. """ nmsed_boxes = [] nmsed_scores = [] nmsed_classes = [] num_classes_for_box = boxes.get_shape().as_list()[1] num_classes = scores.get_shape().as_list()[1] if attributes: nmsed_attributes = {att_name: [] for att_name in attributes.keys()} else: nmsed_attributes = {} for i in range(num_classes): boxes_i = boxes[:, min(num_classes_for_box - 1, i)] scores_i = scores[:, i] # Obtains pre_nms_top_k before running NMS. scores_i, indices = tf.nn.top_k( scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k) ) boxes_i = tf.gather(boxes_i, indices) if soft_nms_sigma is not None: (nmsed_indices_i, nmsed_scores_i) = ( tf.image.non_max_suppression_with_scores( tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, soft_nms_sigma=soft_nms_sigma, name='nms_detections_' + str(i), ) ) nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i) nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size( nmsed_boxes_i, max_num_detections, 0.0 ) nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size( nmsed_scores_i, max_num_detections, -1.0 ) else: (nmsed_indices_i, nmsed_num_valid_i) = ( tf.image.non_max_suppression_padded( tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_to_max_output_size=True, name='nms_detections_' + str(i), ) ) nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i) nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i) # Sets scores of invalid boxes to -1. nmsed_scores_i = tf.where( tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]), nmsed_scores_i, -tf.ones_like(nmsed_scores_i), ) nmsed_classes_i = tf.fill([max_num_detections], i) nmsed_boxes.append(nmsed_boxes_i) nmsed_scores.append(nmsed_scores_i) nmsed_classes.append(nmsed_classes_i) if attributes: for att_name, att in attributes.items(): num_classes_for_attr = att.get_shape().as_list()[1] att_i = att[:, min(num_classes_for_attr - 1, i)] att_i = tf.gather(att_i, indices) nmsed_att_i = tf.gather(att_i, nmsed_indices_i) nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size( nmsed_att_i, max_num_detections, 0.0 ) nmsed_attributes[att_name].append(nmsed_att_i) # Concats results from all classes and sort them. nmsed_boxes = tf.concat(nmsed_boxes, axis=0) nmsed_scores = tf.concat(nmsed_scores, axis=0) nmsed_classes = tf.concat(nmsed_classes, axis=0) nmsed_scores, indices = tf.nn.top_k( nmsed_scores, k=max_num_detections, sorted=True ) nmsed_boxes = tf.gather(nmsed_boxes, indices) nmsed_classes = tf.gather(nmsed_classes, indices) valid_detections = tf.reduce_sum( tf.cast(tf.greater(nmsed_scores, -1), tf.int32) ) if attributes: for att_name in attributes.keys(): nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0) nmsed_attributes[att_name] = tf.gather( nmsed_attributes[att_name], indices ) return ( nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes, ) def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int): """Selects top_k scores and indices for each class. Args: scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class logit outputs on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. pre_nms_num_detections: Number of candidates before NMS. Returns: scores and indices: A `tf.Tensor` with shape `[batch_size, pre_nms_num_detections, num_classes]`. """ batch_size, num_anchors, num_class = scores_in.get_shape().as_list() if batch_size is None: batch_size = tf.shape(scores_in)[0] scores_trans = tf.transpose(scores_in, perm=[0, 2, 1]) scores_trans = tf.reshape(scores_trans, [-1, num_anchors]) top_k_scores, top_k_indices = tf.nn.top_k( scores_trans, k=pre_nms_num_detections, sorted=True ) top_k_scores = tf.reshape( top_k_scores, [batch_size, num_class, pre_nms_num_detections] ) top_k_indices = tf.reshape( top_k_indices, [batch_size, num_class, pre_nms_num_detections] ) return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose( top_k_indices, [0, 2, 1] ) def _generate_detections_v2_class_agnostic( boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100 ): """Generates the final detections by applying class-agnostic NMS. Args: boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. pre_nms_top_k: An `int` number of top candidate detections per class before NMS. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. Returns: nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections] representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ with tf.name_scope('generate_detections_class_agnostic'): nmsed_boxes = [] nmsed_classes = [] nmsed_scores = [] valid_detections = [] batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list() if batch_size is None: batch_size = tf.shape(boxes)[0] _, total_anchors, _ = scores.get_shape().as_list() # Keeps only the class with highest score for each predicted box. scores_condensed, classes_ids = tf.nn.top_k( scores, k=1, sorted=True ) scores_condensed = tf.squeeze(scores_condensed, axis=[2]) if num_classes_for_box > 1: boxes = tf.gather(boxes, classes_ids, axis=2, batch_dims=2) boxes_condensed = tf.squeeze(boxes, axis=[2]) classes_condensed = tf.squeeze(classes_ids, axis=[2]) # Selects top pre_nms_num scores and indices before NMS. num_anchors_filtered = min(total_anchors, pre_nms_top_k) scores_filtered, indices_filtered = tf.nn.top_k( scores_condensed, k=num_anchors_filtered, sorted=True ) classes_filtered = tf.gather( classes_condensed, indices_filtered, axis=1, batch_dims=1 ) boxes_filtered = tf.gather( boxes_condensed, indices_filtered, axis=1, batch_dims=1 ) tf.ensure_shape(boxes_filtered, [None, num_anchors_filtered, 4]) tf.ensure_shape(classes_filtered, [None, num_anchors_filtered]) tf.ensure_shape(scores_filtered, [None, num_anchors_filtered]) boxes_filtered = tf.cast( boxes_filtered, tf.float32 ) scores_filtered = tf.cast( scores_filtered, tf.float32 ) # Apply class-agnostic NMS on boxes. (nmsed_indices_padded, valid_detections) = ( tf.image.non_max_suppression_padded( boxes=boxes_filtered, scores=scores_filtered, max_output_size=max_num_detections, iou_threshold=nms_iou_threshold, pad_to_max_output_size=True, score_threshold=pre_nms_score_threshold, sorted_input=True, name='nms_detections' ) ) nmsed_boxes = tf.gather( boxes_filtered, nmsed_indices_padded, batch_dims=1, axis=1 ) nmsed_scores = tf.gather( scores_filtered, nmsed_indices_padded, batch_dims=1, axis=1 ) nmsed_classes = tf.gather( classes_filtered, nmsed_indices_padded, batch_dims=1, axis=1 ) # Sets the padded boxes, scores, and classes to 0. padding_mask = tf.reshape( tf.range(max_num_detections), [1, -1] ) < tf.reshape(valid_detections, [-1, 1]) nmsed_boxes = nmsed_boxes * tf.cast( tf.expand_dims(padding_mask, axis=2), nmsed_boxes.dtype ) nmsed_scores = nmsed_scores * tf.cast(padding_mask, nmsed_scores.dtype) nmsed_classes = nmsed_classes * tf.cast(padding_mask, nmsed_classes.dtype) return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections def _generate_detections_v2_class_aware( boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, ): """Generates the final detections by using class-aware NMS. Args: boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. pre_nms_top_k: An `int` number of top candidate detections per class before NMS. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. Returns: nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections] representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ with tf.name_scope('generate_detections'): nmsed_boxes = [] nmsed_classes = [] nmsed_scores = [] valid_detections = [] batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list() if batch_size is None: batch_size = tf.shape(boxes)[0] _, total_anchors, num_classes = scores.get_shape().as_list() # Selects top pre_nms_num scores and indices before NMS. scores, indices = _select_top_k_scores( scores, min(total_anchors, pre_nms_top_k) ) for i in range(num_classes): boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :] scores_i = scores[:, :, i] # Obtains pre_nms_top_k before running NMS. boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1) # Filter out scores. boxes_i, scores_i = box_ops.filter_boxes_by_scores( boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold ) (nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded( tf.cast(scores_i, tf.float32), tf.cast(boxes_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, ) nmsed_classes_i = tf.fill([batch_size, max_num_detections], i) nmsed_boxes.append(nmsed_boxes_i) nmsed_scores.append(nmsed_scores_i) nmsed_classes.append(nmsed_classes_i) nmsed_boxes = tf.concat(nmsed_boxes, axis=1) nmsed_scores = tf.concat(nmsed_scores, axis=1) nmsed_classes = tf.concat(nmsed_classes, axis=1) nmsed_scores, indices = tf.nn.top_k( nmsed_scores, k=max_num_detections, sorted=True ) nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1) nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1) valid_detections = tf.reduce_sum( input_tensor=tf.cast(tf.greater(nmsed_scores, 0.0), tf.int32), axis=1 ) return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections def _generate_detections_v2( boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, use_class_agnostic_nms: Optional[bool] = None, ): """Generates the final detections given the model outputs. This implementation unrolls classes dimension while using the tf.while_loop to implement the batched NMS, so that it can be parallelized at the batch dimension. It should give better performance comparing to v1 implementation. It is TPU compatible. Args: boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. pre_nms_top_k: An `int` number of top candidate detections per class before NMS. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. use_class_agnostic_nms: A `bool` of whether non max suppression is operated on all the boxes using max scores across all classes. Returns: nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections] representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ if use_class_agnostic_nms: return _generate_detections_v2_class_agnostic( boxes=boxes, scores=scores, pre_nms_top_k=pre_nms_top_k, pre_nms_score_threshold=pre_nms_score_threshold, nms_iou_threshold=nms_iou_threshold, max_num_detections=max_num_detections, ) return _generate_detections_v2_class_aware( boxes=boxes, scores=scores, pre_nms_top_k=pre_nms_top_k, pre_nms_score_threshold=pre_nms_score_threshold, nms_iou_threshold=nms_iou_threshold, max_num_detections=max_num_detections, ) def _generate_detections_v3( boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, refinements: int = 2, ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Generates the detections given the model outputs using NMS for EdgeTPU. Args: boxes: A `tf.Tensor` with shape `[batch_size, num_classes, N, 4]` or `[batch_size, 1, N, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, num_classes, N]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. refinements: Quality parameter for NMS algorithm. Returns: nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections] representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. Raises: ValueError if inputs shapes are not valid. """ one = tf.constant(1, dtype=scores.dtype) with tf.name_scope('generate_detections'): batch_size, num_box_classes, box_locations, sides = ( boxes.get_shape().as_list() ) if batch_size is None: batch_size = tf.shape(boxes)[0] _, num_classes, locations = scores.get_shape().as_list() if num_box_classes != 1 and num_box_classes != num_classes: raise ValueError('Boxes should have either 1 class or same as scores.') if locations != box_locations: raise ValueError('Number of locations is different.') if sides != 4: raise ValueError('Number of sides is incorrect.') # Selects pre_nms_score_threshold scores before NMS. boxes, scores = box_ops.filter_boxes_by_scores( boxes, scores, min_score_threshold=pre_nms_score_threshold ) # EdgeTPU-friendly class-wise NMS, -1 for invalid. indices = edgetpu.non_max_suppression_padded( boxes, scores, max_num_detections, iou_threshold=nms_iou_threshold, refinements=refinements, ) # Gather NMS-ed boxes and scores. safe_indices = tf.nn.relu(indices) # 0 for invalid invalid_detections = safe_indices - indices # 1 for invalid, 0 for valid valid_detections = one - invalid_detections # 0 for invalid, 1 for valid safe_indices = tf.cast(safe_indices, tf.int32) boxes = tf.gather(boxes, safe_indices, axis=2, batch_dims=2) boxes = tf.cast(tf.expand_dims(valid_detections, -1), boxes.dtype) * boxes scores = valid_detections * tf.gather( scores, safe_indices, axis=2, batch_dims=2 ) # Compliment with class numbers. classes = tf.constant(np.arange(num_classes), dtype=scores.dtype) classes = tf.reshape(classes, [1, num_classes, 1]) classes = tf.tile(classes, [batch_size, 1, max_num_detections]) # Flatten classes, locations. Class = -1 for invalid detection scores = tf.reshape(scores, [batch_size, num_classes * max_num_detections]) boxes = tf.reshape(boxes, [batch_size, num_classes * max_num_detections, 4]) classes = tf.reshape( valid_detections * classes - invalid_detections, [batch_size, num_classes * max_num_detections], ) # Filter top-k across boxes of all classes scores, indices = tf.nn.top_k(scores, k=max_num_detections, sorted=True) boxes = tf.gather(boxes, indices, batch_dims=1, axis=1) classes = tf.gather(classes, indices, batch_dims=1, axis=1) invalid_detections = tf.nn.relu(classes) - classes valid_detections = tf.reduce_sum(one - invalid_detections, axis=1) return boxes, scores, classes, valid_detections def _generate_detections_batched( boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float, nms_iou_threshold: float, max_num_detections: int, ): """Generates detected boxes with scores and classes for one-stage detector. The function takes output of multi-level ConvNets and anchor boxes and generates detected boxes. Note that this used batched nms, which is not supported on TPU currently. Args: boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. pre_nms_score_threshold: A `float` representing the threshold for deciding when to remove boxes based on score. nms_iou_threshold: A `float` representing the threshold for deciding whether boxes overlap too much with respect to IOU. max_num_detections: A `scalar` representing maximum number of boxes retained over all classes. Returns: nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections] representing classes for detected boxes. valid_detections: An `int` tf.Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ with tf.name_scope('generate_detections'): nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = ( tf.image.combined_non_max_suppression( boxes, scores, max_output_size_per_class=max_num_detections, max_total_size=max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_per_class=False, clip_boxes=False, ) ) nmsed_classes = tf.cast(nmsed_classes, tf.int32) return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections def _generate_detections_tflite_implements_signature( config: Dict[str, Any] ) -> str: """Returns `experimental_implements` signature for TFLite's custom NMS op. This signature encodes the arguments to correctly initialize TFLite's custom post-processing op in the MLIR converter. For details on `experimental_implements` see here: https://www.tensorflow.org/api_docs/python/tf/function Args: config: A dictionary of configs defining parameters for TFLite NMS op. Returns: An `experimental_implements` signature string. """ scale_value = 1.0 implements_signature = [ 'name: "%s"' % 'TFLite_Detection_PostProcess', 'attr { key: "max_detections" value { i: %d } }' % config['max_detections'], 'attr { key: "max_classes_per_detection" value { i: %d } }' % config['max_classes_per_detection'], 'attr { key: "use_regular_nms" value { b: %s } }' % str(config['use_regular_nms']).lower(), 'attr { key: "nms_score_threshold" value { f: %f } }' % config['nms_score_threshold'], 'attr { key: "nms_iou_threshold" value { f: %f } }' % config['nms_iou_threshold'], 'attr { key: "y_scale" value { f: %f } }' % scale_value, 'attr { key: "x_scale" value { f: %f } }' % scale_value, 'attr { key: "h_scale" value { f: %f } }' % scale_value, 'attr { key: "w_scale" value { f: %f } }' % scale_value, 'attr { key: "num_classes" value { i: %d } }' % config['num_classes'], ] implements_signature = ' '.join(implements_signature) return implements_signature def _generate_detections_tflite( raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], config: Dict[str, Any], ) -> Sequence[Any]: """Generate detections for conversion to TFLite. Mathematically same as class-agnostic NMS, except that the last portion of the TF graph constitutes a dummy `tf.function` that contains an annotation for conversion to TFLite's custom NMS op. Using this custom op allows features like post-training quantization & accelerator support. NOTE: This function does NOT return a valid output, and is only meant to generate a SavedModel for TFLite conversion via MLIR. The generated SavedModel should not be used for inference. For TFLite op details, see tensorflow/lite/kernels/detection_postprocess.cc Args: raw_boxes: A dictionary of tensors for raw boxes. Key is level of features and value is a tensor denoting a level of boxes with shape [1, H, W, 4 * num_anchors]. raw_scores: A dictionary of tensors for classes. Key is level of features and value is a tensor denoting a level of logits with shape [1, H, W, num_class * num_anchors]. anchor_boxes: A dictionary of tensors for anchor boxes. Key is level of features and value is a tensor denoting a level of anchors with shape [num_anchors, 4]. config: A dictionary of configs defining parameters for TFLite NMS op. Returns: A (dummy) tuple of (boxes, scores, classess, num_detections). Raises: ValueError: If the last dimension of predicted boxes is not divisible by 4, or the last dimension of predicted scores is not divisible by number of anchors per location. """ scores, boxes, anchors = [], [], [] levels = list(raw_scores.keys()) min_level = int(min(levels)) max_level = int(max(levels)) batch_size = tf.shape(raw_scores[str(min_level)])[0] num_anchors_per_locations_times_4 = ( raw_boxes[str(min_level)].get_shape().as_list()[-1] ) if num_anchors_per_locations_times_4 % 4 != 0: raise ValueError( 'The last dimension of predicted boxes should be divisible by 4.' ) num_anchors_per_locations = num_anchors_per_locations_times_4 // 4 if num_anchors_per_locations_times_4 % 4 != 0: raise ValueError( 'The last dimension of predicted scores should be divisible by' f' {num_anchors_per_locations}.' ) num_classes = ( raw_scores[str(min_level)].get_shape().as_list()[-1] // num_anchors_per_locations ) config.update({'num_classes': num_classes}) for i in range(min_level, max_level + 1): scores.append(tf.reshape(raw_scores[str(i)], [batch_size, -1, num_classes])) boxes.append(tf.reshape(raw_boxes[str(i)], [batch_size, -1, 4])) anchors.append(tf.reshape(anchor_boxes[str(i)], [-1, 4])) scores = tf.sigmoid(tf.concat(scores, 1)) boxes = tf.concat(boxes, 1) anchors = tf.concat(anchors, 0) ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2 xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2 ha = anchors[..., 2] - anchors[..., 0] wa = anchors[..., 3] - anchors[..., 1] anchors = tf.stack([ycenter_a, xcenter_a, ha, wa], axis=-1) if config.get('normalize_anchor_coordinates', False): # TFLite's object detection APIs require normalized anchors. height, width = config['input_image_size'] normalize_factor = tf.constant( [height, width, height, width], dtype=tf.float32 ) anchors = anchors / normalize_factor # There is no TF equivalent for TFLite's custom post-processing op. # So we add an 'empty' composite function here, that is legalized to the # custom op with MLIR. # For details, see: tensorflow/compiler/mlir/lite/utils/nms_utils.cc @tf.function( experimental_implements=_generate_detections_tflite_implements_signature( config ) ) # pylint: disable=g-unused-argument,unused-argument def dummy_post_processing(input_boxes, input_scores, input_anchors): boxes = tf.constant(0.0, dtype=tf.float32, name='boxes') scores = tf.constant(0.0, dtype=tf.float32, name='scores') classes = tf.constant(0.0, dtype=tf.float32, name='classes') num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections') return boxes, classes, scores, num_detections if config.get('omit_nms', False): dummy_classes = tf.constant(0.0, dtype=tf.float32, name='classes') dummy_num_detections = tf.constant( 0.0, dtype=tf.float32, name='num_detections') return boxes, dummy_classes, scores, dummy_num_detections return dummy_post_processing(boxes, scores, anchors)[::-1] @tf.keras.utils.register_keras_serializable(package='Vision') class DetectionGenerator(tf.keras.layers.Layer): """Generates the final detected boxes with scores and classes.""" def __init__( self, apply_nms: bool = True, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, nms_version: str = 'v2', use_cpu_nms: bool = False, soft_nms_sigma: Optional[float] = None, use_sigmoid_probability: bool = False, **kwargs, ): """Initializes a detection generator. Args: apply_nms: A `bool` of whether or not apply non maximum suppression. If False, the decoded boxes and their scores are returned. pre_nms_top_k: An `int` of the number of top scores proposals to be kept before applying NMS. pre_nms_score_threshold: A `float` of the score threshold to apply before applying NMS. Proposals whose scores are below this threshold are thrown away. nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold. max_num_detections: An `int` of the final number of total detections to generate. nms_version: A string of `batched`, `v1` or `v2` specifies NMS version. use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU. soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS. When soft_nms_sigma=0.0, we fall back to standard NMS. use_sigmoid_probability: A `bool`, if true, use sigmoid to get probability, otherwise use softmax. **kwargs: Additional keyword arguments passed to Layer. """ self._config_dict = { 'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma, 'use_sigmoid_probability': use_sigmoid_probability, } super(DetectionGenerator, self).__init__(**kwargs) def __call__( self, raw_boxes: tf.Tensor, raw_scores: tf.Tensor, anchor_boxes: tf.Tensor, image_shape: tf.Tensor, regression_weights: Optional[List[float]] = None, bbox_per_class: bool = True, ): """Generates final detections. Args: raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]` representing the class-specific box coordinates relative to anchors. raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]` representing the class logits before applying score activiation. anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing the corresponding anchor boxes w.r.t `box_outputs`. image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image height and width w.r.t. the scaled image, i.e. the same image space as `box_outputs` and `anchor_boxes`. regression_weights: A list of four float numbers to scale coordinates. bbox_per_class: A `bool`. If True, perform per-class box regression. Returns: If `apply_nms` = True, the return is a dictionary with keys: `detection_boxes`: A `float` tf.Tensor of shape [batch, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. `detection_scores`: A `float` `tf.Tensor` of shape [batch, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. `detection_classes`: An `int` tf.Tensor of shape [batch, max_num_detections] representing classes for detected boxes. `num_detections`: An `int` tf.Tensor of shape [batch] only the first `num_detections` boxes are valid detections If `apply_nms` = False, the return is a dictionary with keys: `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4] representing all the decoded boxes. `decoded_box_scores`: A `float` tf.Tensor of shape [batch, num_raw_boxes] representing socres of all the decoded boxes. """ if self._config_dict['use_sigmoid_probability']: box_scores = tf.math.sigmoid(raw_scores) else: box_scores = tf.nn.softmax(raw_scores, axis=-1) # Removes the background class. box_scores_shape = tf.shape(box_scores) box_scores_shape_list = box_scores.get_shape().as_list() batch_size = box_scores_shape[0] num_locations = box_scores_shape_list[1] num_classes = box_scores_shape_list[-1] box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1]) if bbox_per_class: num_detections = num_locations * (num_classes - 1) raw_boxes = tf.reshape( raw_boxes, [batch_size, num_locations, num_classes, 4] ) raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1]) anchor_boxes = tf.tile( tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1] ) raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4]) anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4]) # Box decoding. decoded_boxes = box_ops.decode_boxes( raw_boxes, anchor_boxes, weights=regression_weights ) # Box clipping. if image_shape is not None: decoded_boxes = box_ops.clip_boxes( decoded_boxes, tf.expand_dims(image_shape, axis=1) ) if bbox_per_class: decoded_boxes = tf.reshape( decoded_boxes, [batch_size, num_locations, num_classes - 1, 4] ) else: decoded_boxes = tf.expand_dims(decoded_boxes, axis=2) if not self._config_dict['apply_nms']: return { 'decoded_boxes': decoded_boxes, 'decoded_box_scores': box_scores, } # Optionally force the NMS be run on CPU. if self._config_dict['use_cpu_nms']: nms_context = tf.device('cpu:0') else: nms_context = contextlib.nullcontext() with nms_context: if self._config_dict['nms_version'] == 'batched': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = ( _generate_detections_batched( decoded_boxes, box_scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'], ) ) elif self._config_dict['nms_version'] == 'v1': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = ( _generate_detections_v1( decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict[ 'pre_nms_score_threshold' ], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'], ) ) elif self._config_dict['nms_version'] == 'v2': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = ( _generate_detections_v2( decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict[ 'pre_nms_score_threshold' ], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], ) ) else: raise ValueError( 'NMS version {} not supported.'.format( self._config_dict['nms_version'] ) ) # Adds 1 to offset the background class which has index 0. nmsed_classes += 1 return { 'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores, } def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Vision') class MultilevelDetectionGenerator(tf.keras.layers.Layer): """Generates detected boxes with scores and classes for one-stage detector.""" def __init__( self, apply_nms: bool = True, pre_nms_top_k: int = 5000, pre_nms_score_threshold: float = 0.05, nms_iou_threshold: float = 0.5, max_num_detections: int = 100, nms_version: str = 'v1', use_cpu_nms: bool = False, soft_nms_sigma: Optional[float] = None, tflite_post_processing_config: Optional[Dict[str, Any]] = None, pre_nms_top_k_sharding_block: Optional[int] = None, nms_v3_refinements: Optional[int] = None, return_decoded: Optional[bool] = None, use_class_agnostic_nms: Optional[bool] = None, **kwargs, ): """Initializes a multi-level detection generator. Args: apply_nms: A `bool` of whether or not apply non maximum suppression. If False, the decoded boxes and their scores are returned. pre_nms_top_k: An `int` of the number of top scores proposals to be kept before applying NMS. pre_nms_score_threshold: A `float` of the score threshold to apply before applying NMS. Proposals whose scores are below this threshold are thrown away. nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold. max_num_detections: An `int` of the final number of total detections to generate. nms_version: A string of `batched`, `v1` or `v2` specifies NMS version use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU. soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS. When soft_nms_sigma=0.0, we fall back to standard NMS. tflite_post_processing_config: An optional dictionary containing post-processing parameters used for TFLite custom NMS op. pre_nms_top_k_sharding_block: For v3 (edge tpu friendly) NMS, avoids creating long axis for pre_nms_top_k. Will do top_k in shards of size [num_classes, pre_nms_top_k_sharding_block * boxes_per_location] nms_v3_refinements: For v3 (edge tpu friendly) NMS, sets how close result should be to standard NMS. When None, 2 is used. Here is some experimental deviations for different refinement values: if == 0, AP is reduced 1.0%, AR is reduced 5% on COCO if == 1, AP is reduced 0.2%, AR is reduced 2% on COCO if == 2, AP is reduced <0.1%, AR is reduced <1% on COCO return_decoded: A `bool` of whether to return decoded boxes before NMS regardless of whether `apply_nms` is True or not. use_class_agnostic_nms: A `bool` of whether non max suppression is operated on all the boxes using max scores across all classes. **kwargs: Additional keyword arguments passed to Layer. Raises: ValueError: If `use_class_agnostic_nms` is required by `nms_version` is not specified as `v2`. """ if use_class_agnostic_nms and nms_version != 'v2': raise ValueError( 'If not using TFLite custom NMS, `use_class_agnostic_nms` can only be' ' enabled for NMS v2 for now, but NMS {} is used! If you are using' ' TFLite NMS, please configure TFLite custom NMS for class-agnostic' ' NMS.'.format(nms_version) ) self._config_dict = { 'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma, 'return_decoded': return_decoded, 'use_class_agnostic_nms': use_class_agnostic_nms, } # Don't store if were not defined if pre_nms_top_k_sharding_block is not None: self._config_dict['pre_nms_top_k_sharding_block'] = ( pre_nms_top_k_sharding_block ) if nms_v3_refinements is not None: self._config_dict['nms_v3_refinements'] = nms_v3_refinements if tflite_post_processing_config is not None: self._config_dict.update( {'tflite_post_processing_config': tflite_post_processing_config} ) super().__init__(**kwargs) def _decode_multilevel_outputs( self, raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], image_shape: tf.Tensor, raw_attributes: Optional[Mapping[str, tf.Tensor]] = None, ): """Collects dict of multilevel boxes, scores, attributes into lists.""" boxes = [] scores = [] if raw_attributes: attributes = {att_name: [] for att_name in raw_attributes.keys()} else: attributes = {} levels = list(raw_boxes.keys()) min_level = int(min(levels)) max_level = int(max(levels)) for i in range(min_level, max_level + 1): raw_boxes_i = raw_boxes[str(i)] raw_scores_i = raw_scores[str(i)] batch_size = tf.shape(raw_boxes_i)[0] (_, feature_h_i, feature_w_i, num_anchors_per_locations_times_4) = ( raw_boxes_i.get_shape().as_list() ) num_locations = feature_h_i * feature_w_i num_anchors_per_locations = num_anchors_per_locations_times_4 // 4 num_classes = ( raw_scores_i.get_shape().as_list()[-1] // num_anchors_per_locations ) # Applies score transformation and remove the implicit background class. scores_i = tf.sigmoid( tf.reshape( raw_scores_i, [ batch_size, num_locations * num_anchors_per_locations, num_classes, ], ) ) scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1]) # Box decoding. # The anchor boxes are shared for all data in a batch. # One stage detector only supports class agnostic box regression. anchor_boxes_i = tf.reshape( anchor_boxes[str(i)], [batch_size, num_locations * num_anchors_per_locations, 4], ) raw_boxes_i = tf.reshape( raw_boxes_i, [batch_size, num_locations * num_anchors_per_locations, 4], ) boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i) # Box clipping. if image_shape is not None: boxes_i = box_ops.clip_boxes( boxes_i, tf.expand_dims(image_shape, axis=1) ) boxes.append(boxes_i) scores.append(scores_i) if raw_attributes: for att_name, raw_att in raw_attributes.items(): attribute_size = ( raw_att[str(i)].get_shape().as_list()[-1] // num_anchors_per_locations ) att_i = tf.reshape( raw_att[str(i)], [ batch_size, num_locations * num_anchors_per_locations, attribute_size, ], ) attributes[att_name].append(att_i) boxes = tf.concat(boxes, axis=1) boxes = tf.expand_dims(boxes, axis=2) scores = tf.concat(scores, axis=1) if raw_attributes: for att_name in raw_attributes.keys(): attributes[att_name] = tf.concat(attributes[att_name], axis=1) attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2) return boxes, scores, attributes def _decode_multilevel_outputs_and_pre_nms_top_k( self, raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], image_shape: tf.Tensor, ) -> Tuple[tf.Tensor, tf.Tensor]: """Collects dict of multilevel boxes, scores into lists.""" boxes = None scores = None pre_nms_top_k = self._config_dict['pre_nms_top_k'] # TODO(b/258007436): consider removing when compiler be able to handle # it on its own. pre_nms_top_k_sharding_block = self._config_dict.get( 'pre_nms_top_k_sharding_block', 128 ) levels = list(raw_boxes.keys()) min_level = int(min(levels)) max_level = int(max(levels)) if image_shape is not None: clip_shape = tf.expand_dims(tf.expand_dims(image_shape, axis=1), axis=1) else: clip_shape = None for i in range(max_level, min_level - 1, -1): ( batch_size, unsharded_h, unsharded_w, num_anchors_per_locations_times_4, ) = ( raw_boxes[str(i)].get_shape().as_list() ) num_anchors_per_locations = num_anchors_per_locations_times_4 // 4 if batch_size is None: batch_size = tf.shape(raw_boxes[str(i)])[0] block = max(1, pre_nms_top_k_sharding_block // unsharded_w) boxes_shape = [ batch_size, unsharded_h, unsharded_w * num_anchors_per_locations, 4, ] decoded_boxes = box_ops.decode_boxes( tf.reshape(raw_boxes[str(i)], boxes_shape), tf.reshape(anchor_boxes[str(i)], boxes_shape), ) if clip_shape is not None: decoded_boxes = box_ops.clip_boxes( decoded_boxes, clip_shape, ) for raw_scores_i, decoded_boxes_i in edgetpu.shard_tensors( 1, block, (raw_scores[str(i)], decoded_boxes) ): (_, feature_h_i, feature_w_i, _) = raw_scores_i.get_shape().as_list() num_locations = feature_h_i * feature_w_i num_classes = ( raw_scores_i.get_shape().as_list()[-1] // num_anchors_per_locations ) # Applies score transformation and remove the implicit background class. scores_i = tf.slice( tf.transpose( tf.reshape( raw_scores_i, [ batch_size, num_locations * num_anchors_per_locations, num_classes, ], ), [0, 2, 1], ), [0, 1, 0], [-1, -1, -1], ) # Box decoding. # The anchor boxes are shared for all data in a batch. # One stage detector only supports class agnostic box regression. boxes_i = tf.tile( tf.reshape( decoded_boxes_i, [batch_size, 1, num_locations * num_anchors_per_locations, 4], ), [1, num_classes - 1, 1, 1], ) scores, boxes = edgetpu.concat_and_top_k( pre_nms_top_k, (scores, scores_i), (boxes, boxes_i) ) boxes: tf.Tensor = boxes # pytype: disable=annotation-type-mismatch return boxes, tf.sigmoid(scores) def __call__( self, raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], image_shape: tf.Tensor, raw_attributes: Optional[Mapping[str, tf.Tensor]] = None, ) -> Mapping[str, Any]: """Generates final detections. Args: raw_boxes: A `dict` with keys representing FPN levels and values representing box tenors of shape `[batch, feature_h, feature_w, num_anchors * 4]`. raw_scores: A `dict` with keys representing FPN levels and values representing logit tensors of shape `[batch, feature_h, feature_w, num_anchors * num_classes]`. anchor_boxes: A `dict` with keys representing FPN levels and values representing anchor tenors of shape `[batch_size, K, 4]` representing the corresponding anchor boxes w.r.t `box_outputs`. image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image height and width w.r.t. the scaled image, i.e. the same image space as `box_outputs` and `anchor_boxes`. raw_attributes: If not None, a `dict` of (attribute_name, attribute_prediction) pairs. `attribute_prediction` is a dict that contains keys representing FPN levels and values representing tenors of shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`. Returns: If `apply_nms` = True, the return is a dictionary with keys: `detection_boxes`: A `float` tf.Tensor of shape [batch, max_num_detections, 4] representing top detected boxes in [y1, x1, y2, x2]. `detection_scores`: A `float` tf.Tensor of shape [batch, max_num_detections] representing sorted confidence scores for detected boxes. The values are between [0, 1]. `detection_classes`: An `int` tf.Tensor of shape [batch, max_num_detections] representing classes for detected boxes. `num_detections`: An `int` tf.Tensor of shape [batch] only the first `num_detections` boxes are valid detections `detection_attributes`: A dict. Values of the dict is a `float` tf.Tensor of shape [batch, max_num_detections, attribute_size] representing attribute predictions for detected boxes. If `apply_nms` = False, the return is a dictionary with following keys. If `return_decoded` = True, the following items will also be included even if `apply_nms` = True: `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4] representing all the decoded boxes. `decoded_box_scores`: A `float` tf.Tensor of shape [batch, num_raw_boxes] representing socres of all the decoded boxes. `decoded_box_attributes`: A dict. Values in the dict is a `float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size] representing attribute predictions of all the decoded boxes. """ if ( self._config_dict['apply_nms'] and self._config_dict['nms_version'] == 'tflite' ): boxes, classes, scores, num_detections = _generate_detections_tflite( raw_boxes, raw_scores, anchor_boxes, self.get_config()['tflite_post_processing_config'], ) return { 'num_detections': num_detections, 'detection_boxes': boxes, 'detection_classes': classes, 'detection_scores': scores, } if self._config_dict['nms_version'] != 'v3': boxes, scores, attributes = self._decode_multilevel_outputs( raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes ) else: attributes = None boxes, scores = self._decode_multilevel_outputs_and_pre_nms_top_k( raw_boxes, raw_scores, anchor_boxes, image_shape ) decoded_results = { 'decoded_boxes': boxes, 'decoded_box_scores': scores, 'decoded_box_attributes': attributes, } if not self._config_dict['apply_nms']: return decoded_results # Optionally force the NMS to run on CPU. if self._config_dict['use_cpu_nms']: nms_context = tf.device('cpu:0') else: nms_context = contextlib.nullcontext() with nms_context: if raw_attributes and (self._config_dict['nms_version'] != 'v1'): raise ValueError( 'Attribute learning is only supported for NMSv1 but NMS {} is used.' .format(self._config_dict['nms_version']) ) if self._config_dict['nms_version'] == 'batched': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = ( _generate_detections_batched( boxes, scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'], ) ) # Set `nmsed_attributes` to None for batched NMS. nmsed_attributes = {} elif self._config_dict['nms_version'] == 'v1': ( nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes, ) = _generate_detections_v1( boxes, scores, attributes=attributes if raw_attributes else None, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict[ 'pre_nms_score_threshold' ], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'], ) elif self._config_dict['nms_version'] == 'v2': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = ( _generate_detections_v2( boxes, scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict[ 'pre_nms_score_threshold' ], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], use_class_agnostic_nms=self._config_dict[ 'use_class_agnostic_nms' ], ) ) # Set `nmsed_attributes` to None for v2. nmsed_attributes = {} elif self._config_dict['nms_version'] == 'v3': (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = ( _generate_detections_v3( boxes, scores, pre_nms_score_threshold=self._config_dict[ 'pre_nms_score_threshold' ], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], refinements=self._config_dict.get('nms_v3_refinements', 2), ) ) # Set `nmsed_attributes` to None for v3. nmsed_attributes = {} else: raise ValueError( 'NMS version {} not supported.'.format( self._config_dict['nms_version'] ) ) # Adds 1 to offset the background class which has index 0. nmsed_classes += 1 return { **(decoded_results if self._config_dict['return_decoded'] else {}), 'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores, 'detection_attributes': nmsed_attributes, } def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
64,978
40.04801
80
py
models
models-master/official/vision/modeling/layers/nn_blocks_3d.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for 3D networks.""" # Import libraries import tensorflow as tf from official.modeling import tf_utils from official.vision.modeling.layers import nn_layers @tf.keras.utils.register_keras_serializable(package='Vision') class SelfGating(tf.keras.layers.Layer): """Feature gating as used in S3D-G. This implements the S3D-G network from: Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, Kevin Murphy. Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification. (https://arxiv.org/pdf/1712.04851.pdf) """ def __init__(self, filters, **kwargs): """Initializes a self-gating layer. Args: filters: An `int` number of filters for the convolutional layer. **kwargs: Additional keyword arguments to be passed. """ super(SelfGating, self).__init__(**kwargs) self._filters = filters def build(self, input_shape): self._spatial_temporal_average = tf.keras.layers.GlobalAveragePooling3D() # No BN and activation after conv. self._transformer_w = tf.keras.layers.Conv3D( filters=self._filters, kernel_size=[1, 1, 1], use_bias=True, kernel_initializer=tf.keras.initializers.TruncatedNormal( mean=0.0, stddev=0.01)) super(SelfGating, self).build(input_shape) def call(self, inputs): x = self._spatial_temporal_average(inputs) x = tf.expand_dims(x, 1) x = tf.expand_dims(x, 2) x = tf.expand_dims(x, 3) x = self._transformer_w(x) x = tf.nn.sigmoid(x) return tf.math.multiply(x, inputs) @tf.keras.utils.register_keras_serializable(package='Vision') class BottleneckBlock3D(tf.keras.layers.Layer): """Creates a 3D bottleneck block.""" def __init__(self, filters, temporal_kernel_size, temporal_strides, spatial_strides, stochastic_depth_drop_rate=0.0, se_ratio=None, use_self_gating=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, **kwargs): """Initializes a 3D bottleneck block with BN after convolutions. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. temporal_kernel_size: An `int` of kernel size for the temporal convolutional layer. temporal_strides: An `int` of ftemporal stride for the temporal convolutional layer. spatial_strides: An `int` of spatial stride for the spatial convolutional layer. stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for the stochastic depth layer. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. use_self_gating: A `bool` of whether to apply self-gating module or not. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ super(BottleneckBlock3D, self).__init__(**kwargs) self._filters = filters self._temporal_kernel_size = temporal_kernel_size self._spatial_strides = spatial_strides self._temporal_strides = temporal_strides self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._use_self_gating = use_self_gating self._se_ratio = se_ratio self._use_sync_bn = use_sync_bn self._activation = activation self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_fn = tf_utils.get_activation(activation) def build(self, input_shape): self._shortcut_maxpool = tf.keras.layers.MaxPool3D( pool_size=[1, 1, 1], strides=[ self._temporal_strides, self._spatial_strides, self._spatial_strides ]) self._shortcut_conv = tf.keras.layers.Conv3D( filters=4 * self._filters, kernel_size=1, strides=[ self._temporal_strides, self._spatial_strides, self._spatial_strides ], use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn) self._temporal_conv = tf.keras.layers.Conv3D( filters=self._filters, kernel_size=[self._temporal_kernel_size, 1, 1], strides=[self._temporal_strides, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn) self._spatial_conv = tf.keras.layers.Conv3D( filters=self._filters, kernel_size=[1, 3, 3], strides=[1, self._spatial_strides, self._spatial_strides], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn) self._expand_conv = tf.keras.layers.Conv3D( filters=4 * self._filters, kernel_size=[1, 1, 1], strides=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm3 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = nn_layers.SqueezeExcitation( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, use_3d_input=True, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None if self._use_self_gating: self._self_gating = SelfGating(filters=4 * self._filters) else: self._self_gating = None super(BottleneckBlock3D, self).build(input_shape) def get_config(self): config = { 'filters': self._filters, 'temporal_kernel_size': self._temporal_kernel_size, 'temporal_strides': self._temporal_strides, 'spatial_strides': self._spatial_strides, 'use_self_gating': self._use_self_gating, 'se_ratio': self._se_ratio, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } base_config = super(BottleneckBlock3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): in_filters = inputs.shape.as_list()[-1] if in_filters == 4 * self._filters: if self._temporal_strides == 1 and self._spatial_strides == 1: shortcut = inputs else: shortcut = self._shortcut_maxpool(inputs) else: shortcut = self._shortcut_conv(inputs) shortcut = self._norm0(shortcut) x = self._temporal_conv(inputs) x = self._norm1(x) x = self._activation_fn(x) x = self._spatial_conv(x) x = self._norm2(x) x = self._activation_fn(x) x = self._expand_conv(x) x = self._norm3(x) # Apply self-gating, SE, stochastic depth. if self._self_gating: x = self._self_gating(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) if self._stochastic_depth: x = self._stochastic_depth(x, training=training) # Apply activation before additional modules. x = self._activation_fn(x + shortcut) return x
10,565
35.560554
80
py
models
models-master/official/vision/modeling/layers/mask_sampler.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of mask sampler.""" # Import libraries import tensorflow as tf from official.vision.ops import spatial_transform_ops def _sample_and_crop_foreground_masks(candidate_rois: tf.Tensor, candidate_gt_boxes: tf.Tensor, candidate_gt_classes: tf.Tensor, candidate_gt_indices: tf.Tensor, gt_masks: tf.Tensor, num_sampled_masks: int = 128, mask_target_size: int = 28): """Samples and creates cropped foreground masks for training. Args: candidate_rois: A `tf.Tensor` of shape of [batch_size, N, 4], where N is the number of candidate RoIs to be considered for mask sampling. It includes both positive and negative RoIs. The `num_mask_samples_per_image` positive RoIs will be sampled to create mask training targets. candidate_gt_boxes: A `tf.Tensor` of shape of [batch_size, N, 4], storing the corresponding groundtruth boxes to the `candidate_rois`. candidate_gt_classes: A `tf.Tensor` of shape of [batch_size, N], storing the corresponding groundtruth classes to the `candidate_rois`. 0 in the tensor corresponds to the background class, i.e. negative RoIs. candidate_gt_indices: A `tf.Tensor` of shape [batch_size, N], storing the corresponding groundtruth instance indices to the `candidate_gt_boxes`, i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i] and gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >= N, is the superset of candidate_gt_boxes. gt_masks: A `tf.Tensor` of [batch_size, MAX_INSTANCES, mask_height, mask_width] containing all the groundtruth masks which sample masks are drawn from. num_sampled_masks: An `int` that specifies the number of masks to sample. mask_target_size: An `int` that specifies the final cropped mask size after sampling. The output masks are resized w.r.t the sampled RoIs. Returns: foreground_rois: A `tf.Tensor` of shape of [batch_size, K, 4] storing the RoI that corresponds to the sampled foreground masks, where K = num_mask_samples_per_image. foreground_classes: A `tf.Tensor` of shape of [batch_size, K] storing the classes corresponding to the sampled foreground masks. cropoped_foreground_masks: A `tf.Tensor` of shape of [batch_size, K, mask_target_size, mask_target_size] storing the cropped foreground masks used for training. """ _, fg_instance_indices = tf.nn.top_k( tf.cast(tf.greater(candidate_gt_classes, 0), dtype=tf.int32), k=num_sampled_masks) fg_instance_indices_shape = tf.shape(fg_instance_indices) batch_indices = ( tf.expand_dims(tf.range(fg_instance_indices_shape[0]), axis=-1) * tf.ones([1, fg_instance_indices_shape[-1]], dtype=tf.int32)) gather_nd_instance_indices = tf.stack( [batch_indices, fg_instance_indices], axis=-1) foreground_rois = tf.gather_nd( candidate_rois, gather_nd_instance_indices) foreground_boxes = tf.gather_nd( candidate_gt_boxes, gather_nd_instance_indices) foreground_classes = tf.gather_nd( candidate_gt_classes, gather_nd_instance_indices) foreground_gt_indices = tf.gather_nd( candidate_gt_indices, gather_nd_instance_indices) foreground_gt_indices = tf.where( tf.equal(foreground_gt_indices, -1), tf.zeros_like(foreground_gt_indices), foreground_gt_indices) foreground_gt_indices_shape = tf.shape(foreground_gt_indices) batch_indices = ( tf.expand_dims(tf.range(foreground_gt_indices_shape[0]), axis=-1) * tf.ones([1, foreground_gt_indices_shape[-1]], dtype=tf.int32)) gather_nd_gt_indices = tf.stack( [batch_indices, foreground_gt_indices], axis=-1) foreground_masks = tf.gather_nd(gt_masks, gather_nd_gt_indices) cropped_foreground_masks = spatial_transform_ops.crop_mask_in_target_box( foreground_masks, foreground_boxes, foreground_rois, mask_target_size, sample_offset=0.5) return foreground_rois, foreground_classes, cropped_foreground_masks @tf.keras.utils.register_keras_serializable(package='Vision') class MaskSampler(tf.keras.layers.Layer): """Samples and creates mask training targets.""" def __init__(self, mask_target_size: int, num_sampled_masks: int, **kwargs): self._config_dict = { 'mask_target_size': mask_target_size, 'num_sampled_masks': num_sampled_masks, } super(MaskSampler, self).__init__(**kwargs) def call(self, candidate_rois: tf.Tensor, candidate_gt_boxes: tf.Tensor, candidate_gt_classes: tf.Tensor, candidate_gt_indices: tf.Tensor, gt_masks: tf.Tensor): """Samples and creates mask targets for training. Args: candidate_rois: A `tf.Tensor` of shape of [batch_size, N, 4], where N is the number of candidate RoIs to be considered for mask sampling. It includes both positive and negative RoIs. The `num_mask_samples_per_image` positive RoIs will be sampled to create mask training targets. candidate_gt_boxes: A `tf.Tensor` of shape of [batch_size, N, 4], storing the corresponding groundtruth boxes to the `candidate_rois`. candidate_gt_classes: A `tf.Tensor` of shape of [batch_size, N], storing the corresponding groundtruth classes to the `candidate_rois`. 0 in the tensor corresponds to the background class, i.e. negative RoIs. candidate_gt_indices: A `tf.Tensor` of shape [batch_size, N], storing the corresponding groundtruth instance indices to the `candidate_gt_boxes`, i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i], where gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >= N, is the superset of candidate_gt_boxes. gt_masks: A `tf.Tensor` of [batch_size, MAX_INSTANCES, mask_height, mask_width] containing all the groundtruth masks which sample masks are drawn from. after sampling. The output masks are resized w.r.t the sampled RoIs. Returns: foreground_rois: A `tf.Tensor` of shape of [batch_size, K, 4] storing the RoI that corresponds to the sampled foreground masks, where K = num_mask_samples_per_image. foreground_classes: A `tf.Tensor` of shape of [batch_size, K] storing the classes corresponding to the sampled foreground masks. cropoped_foreground_masks: A `tf.Tensor` of shape of [batch_size, K, mask_target_size, mask_target_size] storing the cropped foreground masks used for training. """ foreground_rois, foreground_classes, cropped_foreground_masks = ( _sample_and_crop_foreground_masks( candidate_rois, candidate_gt_boxes, candidate_gt_classes, candidate_gt_indices, gt_masks, self._config_dict['num_sampled_masks'], self._config_dict['mask_target_size'])) return foreground_rois, foreground_classes, cropped_foreground_masks def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
7,914
46.39521
80
py
models
models-master/official/vision/modeling/layers/nn_blocks_3d_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.layers import nn_blocks_3d class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks_3d.BottleneckBlock3D, 1, 1, 2, True, 0.2, 0.1), (nn_blocks_3d.BottleneckBlock3D, 3, 2, 1, False, 0.0, 0.0), ) def test_bottleneck_block_creation(self, block_fn, temporal_kernel_size, temporal_strides, spatial_strides, use_self_gating, se_ratio, stochastic_depth): temporal_size = 16 spatial_size = 128 filters = 256 inputs = tf.keras.Input( shape=(temporal_size, spatial_size, spatial_size, filters * 4), batch_size=1) block = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_size, temporal_strides=temporal_strides, spatial_strides=spatial_strides, use_self_gating=use_self_gating, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth) features = block(inputs) self.assertAllEqual([ 1, temporal_size // temporal_strides, spatial_size // spatial_strides, spatial_size // spatial_strides, filters * 4 ], features.shape.as_list()) if __name__ == '__main__': tf.test.main()
2,028
33.389831
78
py
models
models-master/official/vision/modeling/layers/roi_generator.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of ROI generator.""" from typing import Optional, Mapping # Import libraries import tensorflow as tf from official.vision.ops import box_ops from official.vision.ops import nms def _multilevel_propose_rois(raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], image_shape: tf.Tensor, pre_nms_top_k: int = 2000, pre_nms_score_threshold: float = 0.0, pre_nms_min_size_threshold: float = 0.0, nms_iou_threshold: float = 0.7, num_proposals: int = 1000, use_batched_nms: bool = False, decode_boxes: bool = True, clip_boxes: bool = True, apply_sigmoid_to_score: bool = True): """Proposes RoIs given a group of candidates from different FPN levels. The following describes the steps: 1. For each individual level: a. Apply sigmoid transform if specified. b. Decode boxes if specified. c. Clip boxes if specified. d. Filter small boxes and those fall outside image if specified. e. Apply pre-NMS filtering including pre-NMS top k and score thresholding. f. Apply NMS. 2. Aggregate post-NMS boxes from each level. 3. Apply an overall top k to generate the final selected RoIs. Args: raw_boxes: A `dict` with keys representing FPN levels and values representing box tenors of shape [batch_size, feature_h, feature_w, num_anchors * 4]. raw_scores: A `dict` with keys representing FPN levels and values representing logit tensors of shape [batch_size, feature_h, feature_w, num_anchors]. anchor_boxes: A `dict` with keys representing FPN levels and values representing anchor box tensors of shape [batch_size, feature_h * feature_w * num_anchors, 4]. image_shape: A `tf.Tensor` of shape [batch_size, 2] where the last dimension are [height, width] of the scaled image. pre_nms_top_k: An `int` of top scoring RPN proposals *per level* to keep before applying NMS. Default: 2000. pre_nms_score_threshold: A `float` between 0 and 1 representing the minimal box score to keep before applying NMS. This is often used as a pre-filtering step for better performance. Default: 0, no filtering is applied. pre_nms_min_size_threshold: A `float` representing the minimal box size in each side (w.r.t. the scaled image) to keep before applying NMS. This is often used as a pre-filtering step for better performance. Default: 0, no filtering is applied. nms_iou_threshold: A `float` between 0 and 1 representing the IoU threshold used for NMS. If 0.0, no NMS is applied. Default: 0.7. num_proposals: An `int` of top scoring RPN proposals *in total* to keep after applying NMS. Default: 1000. use_batched_nms: A `bool` indicating whether NMS is applied in batch using `tf.image.combined_non_max_suppression`. Currently only available in CPU/GPU. Default is False. decode_boxes: A `bool` indicating whether `raw_boxes` needs to be decoded using `anchor_boxes`. If False, use `raw_boxes` directly and ignore `anchor_boxes`. Default is True. clip_boxes: A `bool` indicating whether boxes are first clipped to the scaled image size before appliying NMS. If False, no clipping is applied and `image_shape` is ignored. Default is True. apply_sigmoid_to_score: A `bool` indicating whether apply sigmoid to `raw_scores` before applying NMS. Default is True. Returns: selected_rois: A `tf.Tensor` of shape [batch_size, num_proposals, 4], representing the box coordinates of the selected proposals w.r.t. the scaled image. selected_roi_scores: A `tf.Tensor` of shape [batch_size, num_proposals, 1], representing the scores of the selected proposals. """ with tf.name_scope('multilevel_propose_rois'): rois = [] roi_scores = [] image_shape = tf.expand_dims(image_shape, axis=1) for level in sorted(raw_scores.keys()): with tf.name_scope('level_%s' % level): _, feature_h, feature_w, num_anchors_per_location = ( raw_scores[level].get_shape().as_list()) num_boxes = feature_h * feature_w * num_anchors_per_location this_level_scores = tf.reshape(raw_scores[level], [-1, num_boxes]) this_level_boxes = tf.reshape(raw_boxes[level], [-1, num_boxes, 4]) this_level_anchors = tf.cast( tf.reshape(anchor_boxes[level], [-1, num_boxes, 4]), dtype=this_level_scores.dtype) if apply_sigmoid_to_score: this_level_scores = tf.sigmoid(this_level_scores) if decode_boxes: this_level_boxes = box_ops.decode_boxes( this_level_boxes, this_level_anchors) if clip_boxes: this_level_boxes = box_ops.clip_boxes( this_level_boxes, image_shape) if pre_nms_min_size_threshold > 0.0: this_level_boxes, this_level_scores = box_ops.filter_boxes( this_level_boxes, this_level_scores, image_shape, pre_nms_min_size_threshold) this_level_pre_nms_top_k = min(num_boxes, pre_nms_top_k) this_level_post_nms_top_k = min(num_boxes, num_proposals) if nms_iou_threshold > 0.0: if use_batched_nms: this_level_rois, this_level_roi_scores, _, _ = ( tf.image.combined_non_max_suppression( tf.expand_dims(this_level_boxes, axis=2), tf.expand_dims(this_level_scores, axis=-1), max_output_size_per_class=this_level_pre_nms_top_k, max_total_size=this_level_post_nms_top_k, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_per_class=False, clip_boxes=False)) else: if pre_nms_score_threshold > 0.0: this_level_boxes, this_level_scores = ( box_ops.filter_boxes_by_scores( this_level_boxes, this_level_scores, pre_nms_score_threshold)) this_level_boxes, this_level_scores = box_ops.top_k_boxes( this_level_boxes, this_level_scores, k=this_level_pre_nms_top_k) this_level_roi_scores, this_level_rois = ( nms.sorted_non_max_suppression_padded( this_level_scores, this_level_boxes, max_output_size=this_level_post_nms_top_k, iou_threshold=nms_iou_threshold)) else: this_level_rois, this_level_roi_scores = box_ops.top_k_boxes( this_level_boxes, this_level_scores, k=this_level_post_nms_top_k) rois.append(this_level_rois) roi_scores.append(this_level_roi_scores) all_rois = tf.concat(rois, axis=1) all_roi_scores = tf.concat(roi_scores, axis=1) with tf.name_scope('top_k_rois'): _, num_valid_rois = all_roi_scores.get_shape().as_list() overall_top_k = min(num_valid_rois, num_proposals) selected_rois, selected_roi_scores = box_ops.top_k_boxes( all_rois, all_roi_scores, k=overall_top_k) return selected_rois, selected_roi_scores @tf.keras.utils.register_keras_serializable(package='Vision') class MultilevelROIGenerator(tf.keras.layers.Layer): """Proposes RoIs for the second stage processing.""" def __init__(self, pre_nms_top_k: int = 2000, pre_nms_score_threshold: float = 0.0, pre_nms_min_size_threshold: float = 0.0, nms_iou_threshold: float = 0.7, num_proposals: int = 1000, test_pre_nms_top_k: int = 1000, test_pre_nms_score_threshold: float = 0.0, test_pre_nms_min_size_threshold: float = 0.0, test_nms_iou_threshold: float = 0.7, test_num_proposals: int = 1000, use_batched_nms: bool = False, **kwargs): """Initializes a ROI generator. The ROI generator transforms the raw predictions from RPN to ROIs. Args: pre_nms_top_k: An `int` of the number of top scores proposals to be kept before applying NMS. pre_nms_score_threshold: A `float` of the score threshold to apply before applying NMS. Proposals whose scores are below this threshold are thrown away. pre_nms_min_size_threshold: A `float` of the threshold of each side of the box (w.r.t. the scaled image). Proposals whose sides are below this threshold are thrown away. nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold. num_proposals: An `int` of the final number of proposals to generate. test_pre_nms_top_k: An `int` of the number of top scores proposals to be kept before applying NMS in testing. test_pre_nms_score_threshold: A `float` of the score threshold to apply before applying NMS in testing. Proposals whose scores are below this threshold are thrown away. test_pre_nms_min_size_threshold: A `float` of the threshold of each side of the box (w.r.t. the scaled image) in testing. Proposals whose sides are below this threshold are thrown away. test_nms_iou_threshold: A `float` in [0, 1] of the NMS IoU threshold in testing. test_num_proposals: An `int` of the final number of proposals to generate in testing. use_batched_nms: A `bool` of whether or not use `tf.image.combined_non_max_suppression`. **kwargs: Additional keyword arguments passed to Layer. """ self._config_dict = { 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'pre_nms_min_size_threshold': pre_nms_min_size_threshold, 'nms_iou_threshold': nms_iou_threshold, 'num_proposals': num_proposals, 'test_pre_nms_top_k': test_pre_nms_top_k, 'test_pre_nms_score_threshold': test_pre_nms_score_threshold, 'test_pre_nms_min_size_threshold': test_pre_nms_min_size_threshold, 'test_nms_iou_threshold': test_nms_iou_threshold, 'test_num_proposals': test_num_proposals, 'use_batched_nms': use_batched_nms, } super(MultilevelROIGenerator, self).__init__(**kwargs) def call(self, raw_boxes: Mapping[str, tf.Tensor], raw_scores: Mapping[str, tf.Tensor], anchor_boxes: Mapping[str, tf.Tensor], image_shape: tf.Tensor, training: Optional[bool] = None): """Proposes RoIs given a group of candidates from different FPN levels. The following describes the steps: 1. For each individual level: a. Apply sigmoid transform if specified. b. Decode boxes if specified. c. Clip boxes if specified. d. Filter small boxes and those fall outside image if specified. e. Apply pre-NMS filtering including pre-NMS top k and score thresholding. f. Apply NMS. 2. Aggregate post-NMS boxes from each level. 3. Apply an overall top k to generate the final selected RoIs. Args: raw_boxes: A `dict` with keys representing FPN levels and values representing box tenors of shape [batch, feature_h, feature_w, num_anchors * 4]. raw_scores: A `dict` with keys representing FPN levels and values representing logit tensors of shape [batch, feature_h, feature_w, num_anchors]. anchor_boxes: A `dict` with keys representing FPN levels and values representing anchor box tensors of shape [batch, feature_h * feature_w * num_anchors, 4]. image_shape: A `tf.Tensor` of shape [batch, 2] where the last dimension are [height, width] of the scaled image. training: A `bool` that indicates whether it is in training mode. Returns: roi_boxes: A `tf.Tensor` of shape [batch, num_proposals, 4], the proposed ROIs in the scaled image coordinate. roi_scores: A `tf.Tensor` of shape [batch, num_proposals], scores of the proposed ROIs. """ roi_boxes, roi_scores = _multilevel_propose_rois( raw_boxes, raw_scores, anchor_boxes, image_shape, pre_nms_top_k=( self._config_dict['pre_nms_top_k'] if training else self._config_dict['test_pre_nms_top_k']), pre_nms_score_threshold=( self._config_dict['pre_nms_score_threshold'] if training else self._config_dict['test_pre_nms_score_threshold']), pre_nms_min_size_threshold=( self._config_dict['pre_nms_min_size_threshold'] if training else self._config_dict['test_pre_nms_min_size_threshold']), nms_iou_threshold=( self._config_dict['nms_iou_threshold'] if training else self._config_dict['test_nms_iou_threshold']), num_proposals=( self._config_dict['num_proposals'] if training else self._config_dict['test_num_proposals']), use_batched_nms=self._config_dict['use_batched_nms'], decode_boxes=True, clip_boxes=True, apply_sigmoid_to_score=True) return roi_boxes, roi_scores def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
14,408
44.888535
80
py
models
models-master/official/vision/modeling/layers/nn_layers.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for neural networks.""" from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.vision.ops import spatial_transform_ops # Type annotations. States = Dict[str, tf.Tensor] Activation = Union[str, Callable] def make_divisible(value: float, divisor: int, min_value: Optional[float] = None, round_down_protect: bool = True, ) -> int: """This is to ensure that all layers have channels that are divisible by 8. Args: value: A `float` of original value. divisor: An `int` of the divisor that need to be checked upon. min_value: A `float` of minimum value threshold. round_down_protect: A `bool` indicating whether round down more than 10% will be allowed. Returns: The adjusted value in `int` that is divisible against divisor. """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if round_down_protect and new_value < 0.9 * value: new_value += divisor return int(new_value) def round_filters(filters: int, multiplier: float, divisor: int = 8, min_depth: Optional[int] = None, round_down_protect: bool = True, skip: bool = False) -> int: """Rounds number of filters based on width multiplier.""" orig_f = filters if skip or not multiplier: return filters new_filters = make_divisible(value=filters * multiplier, divisor=divisor, min_value=min_depth, round_down_protect=round_down_protect) logging.info('round_filter input=%s output=%s', orig_f, new_filters) return int(new_filters) def get_padding_for_kernel_size(kernel_size): """Compute padding size given kernel size.""" if kernel_size == 7: return (3, 3) elif kernel_size == 3: return (1, 1) else: raise ValueError('Padding for kernel size {} not known.'.format( kernel_size)) @tf.keras.utils.register_keras_serializable(package='Vision') class SqueezeExcitation(tf.keras.layers.Layer): """Creates a squeeze and excitation layer.""" def __init__(self, in_filters, out_filters, se_ratio, divisible_by=1, use_3d_input=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', gating_activation='sigmoid', round_down_protect=True, **kwargs): """Initializes a squeeze and excitation layer. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. use_3d_input: A `bool` of whether input is 2D or 3D image. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. gating_activation: A `str` name of the activation function for final gating function. round_down_protect: A `bool` of whether round down more than 10% will be allowed. **kwargs: Additional keyword arguments to be passed. """ super(SqueezeExcitation, self).__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._se_ratio = se_ratio self._divisible_by = divisible_by self._round_down_protect = round_down_protect self._use_3d_input = use_3d_input self._activation = activation self._gating_activation = gating_activation self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer if tf.keras.backend.image_data_format() == 'channels_last': if not use_3d_input: self._spatial_axis = [1, 2] else: self._spatial_axis = [1, 2, 3] else: if not use_3d_input: self._spatial_axis = [2, 3] else: self._spatial_axis = [2, 3, 4] self._activation_fn = tf_utils.get_activation(activation) self._gating_activation_fn = tf_utils.get_activation(gating_activation) def build(self, input_shape): num_reduced_filters = make_divisible( max(1, int(self._in_filters * self._se_ratio)), divisor=self._divisible_by, round_down_protect=self._round_down_protect) self._se_reduce = tf.keras.layers.Conv2D( filters=num_reduced_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._se_expand = tf.keras.layers.Conv2D( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=True, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) super(SqueezeExcitation, self).build(input_shape) def get_config(self): config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'use_3d_input': self._use_3d_input, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'gating_activation': self._gating_activation, 'round_down_protect': self._round_down_protect, } base_config = super(SqueezeExcitation, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True) x = self._activation_fn(self._se_reduce(x)) x = self._gating_activation_fn(self._se_expand(x)) return x * inputs def get_stochastic_depth_rate(init_rate, i, n): """Get drop connect rate for the ith block. Args: init_rate: A `float` of initial drop rate. i: An `int` of order of the current block. n: An `int` total number of blocks. Returns: Drop rate of the ith block. """ if init_rate is not None: if init_rate < 0 or init_rate > 1: raise ValueError('Initial drop rate must be within 0 and 1.') rate = init_rate * float(i) / n else: rate = None return rate @tf.keras.utils.register_keras_serializable(package='Vision') class StochasticDepth(tf.keras.layers.Layer): """Creates a stochastic depth layer.""" def __init__(self, stochastic_depth_drop_rate, **kwargs): """Initializes a stochastic depth layer. Args: stochastic_depth_drop_rate: A `float` of drop rate. **kwargs: Additional keyword arguments to be passed. Returns: A output `tf.Tensor` of which should have the same shape as input. """ super(StochasticDepth, self).__init__(**kwargs) self._drop_rate = stochastic_depth_drop_rate def get_config(self): config = {'stochastic_depth_drop_rate': self._drop_rate} base_config = super(StochasticDepth, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): if training is None: training = tf.keras.backend.learning_phase() if not training or self._drop_rate is None or self._drop_rate == 0: return inputs keep_prob = 1.0 - self._drop_rate batch_size = tf.shape(inputs)[0] random_tensor = keep_prob random_tensor += tf.random.uniform( [batch_size] + [1] * (inputs.shape.rank - 1), dtype=inputs.dtype) binary_tensor = tf.floor(random_tensor) output = tf.math.divide(inputs, keep_prob) * binary_tensor return output @tf.keras.utils.register_keras_serializable(package='Vision') def pyramid_feature_fusion(inputs, target_level): """Fuses all feature maps in the feature pyramid at the target level. Args: inputs: A dictionary containing the feature pyramid. The size of the input tensor needs to be fixed. target_level: An `int` of the target feature level for feature fusion. Returns: A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width, feature_channel]. """ # Convert keys to int. pyramid_feats = {int(k): v for k, v in inputs.items()} min_level = min(pyramid_feats.keys()) max_level = max(pyramid_feats.keys()) resampled_feats = [] for l in range(min_level, max_level + 1): if l == target_level: resampled_feats.append(pyramid_feats[l]) else: feat = pyramid_feats[l] target_size = list(feat.shape[1:3]) target_size[0] *= 2**(l - target_level) target_size[1] *= 2**(l - target_level) # Casts feat to float32 so the resize op can be run on TPU. feat = tf.cast(feat, tf.float32) feat = tf.image.resize( feat, size=target_size, method=tf.image.ResizeMethod.BILINEAR) # Casts it back to be compatible with the rest opetations. feat = tf.cast(feat, pyramid_feats[l].dtype) resampled_feats.append(feat) return tf.math.add_n(resampled_feats) class PanopticFPNFusion(tf.keras.Model): """Creates a Panoptic FPN feature Fusion layer. This implements feature fusion for semantic segmentation head from the paper: Alexander Kirillov, Ross Girshick, Kaiming He and Piotr Dollar. Panoptic Feature Pyramid Networks. (https://arxiv.org/pdf/1901.02446.pdf) """ def __init__( self, min_level: int = 2, max_level: int = 5, target_level: int = 2, num_filters: int = 128, num_fpn_filters: int = 256, activation: str = 'relu', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes panoptic FPN feature fusion layer. Args: min_level: An `int` of minimum level to use in feature fusion. max_level: An `int` of maximum level to use in feature fusion. target_level: An `int` of the target feature level for feature fusion. num_filters: An `int` number of filters in conv2d layers. num_fpn_filters: An `int` number of filters in the FPN outputs activation: A `str` name of the activation function. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. Returns: A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width, feature_channel]. """ if target_level > max_level: raise ValueError('target_level should be less than max_level') self._config_dict = { 'min_level': min_level, 'max_level': max_level, 'target_level': target_level, 'num_filters': num_filters, 'num_fpn_filters': num_fpn_filters, 'activation': activation, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } norm = tf.keras.layers.GroupNormalization conv2d = tf.keras.layers.Conv2D activation_fn = tf_utils.get_activation(activation) if tf.keras.backend.image_data_format() == 'channels_last': norm_axis = -1 else: norm_axis = 1 inputs = self._build_inputs(num_fpn_filters, min_level, max_level) upscaled_features = [] for level in range(min_level, max_level + 1): num_conv_layers = max(1, level - target_level) x = inputs[str(level)] for i in range(num_conv_layers): x = conv2d( filters=num_filters, kernel_size=3, padding='same', kernel_initializer=tf.keras.initializers.VarianceScaling(), kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)(x) x = norm(groups=32, axis=norm_axis)(x) x = activation_fn(x) if level != target_level: x = spatial_transform_ops.nearest_upsampling(x, scale=2) upscaled_features.append(x) fused_features = tf.math.add_n(upscaled_features) self._output_specs = {str(target_level): fused_features.get_shape()} super(PanopticFPNFusion, self).__init__( inputs=inputs, outputs=fused_features, **kwargs) def _build_inputs(self, num_filters: int, min_level: int, max_level: int): inputs = {} for level in range(min_level, max_level + 1): inputs[str(level)] = tf.keras.Input(shape=[None, None, num_filters]) return inputs def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self) -> Mapping[str, tf.TensorShape]: """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @tf.keras.utils.register_keras_serializable(package='Vision') class Scale(tf.keras.layers.Layer): """Scales the input by a trainable scalar weight. This is useful for applying ReZero to layers, which improves convergence speed. This implements the paper: ReZero is All You Need: Fast Convergence at Large Depth. (https://arxiv.org/pdf/2003.04887.pdf). """ def __init__( self, initializer: tf.keras.initializers.Initializer = 'ones', regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a scale layer. Args: initializer: A `str` of initializer for the scalar weight. regularizer: A `tf.keras.regularizers.Regularizer` for the scalar weight. **kwargs: Additional keyword arguments to be passed to this layer. Returns: An `tf.Tensor` of which should have the same shape as input. """ super(Scale, self).__init__(**kwargs) self._initializer = initializer self._regularizer = regularizer self._scale = self.add_weight( name='scale', shape=[], dtype=self.dtype, initializer=self._initializer, regularizer=self._regularizer, trainable=True) def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'initializer': self._initializer, 'regularizer': self._regularizer, } base_config = super(Scale, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): """Calls the layer with the given inputs.""" scale = tf.cast(self._scale, inputs.dtype) return scale * inputs @tf.keras.utils.register_keras_serializable(package='Vision') class TemporalSoftmaxPool(tf.keras.layers.Layer): """Creates a network layer corresponding to temporal softmax pooling. This is useful for multi-class logits (used in e.g., Charades). Modified from AssembleNet Charades evaluation from: Michael S. Ryoo, AJ Piergiovanni, Mingxing Tan, Anelia Angelova. AssembleNet: Searching for Multi-Stream Neural Connectivity in Video Architectures. (https://arxiv.org/pdf/1905.13209.pdf). """ def call(self, inputs): """Calls the layer with the given inputs.""" assert inputs.shape.rank in (3, 4, 5) frames = tf.shape(inputs)[1] pre_logits = inputs / tf.sqrt(tf.cast(frames, inputs.dtype)) activations = tf.nn.softmax(pre_logits, axis=1) outputs = inputs * activations return outputs @tf.keras.utils.register_keras_serializable(package='Vision') class PositionalEncoding(tf.keras.layers.Layer): """Creates a network layer that adds a sinusoidal positional encoding. Positional encoding is incremented across frames, and is added to the input. The positional encoding is first weighted at 0 so that the network can choose to ignore it. This implements: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin. Attention Is All You Need. (https://arxiv.org/pdf/1706.03762.pdf). """ def __init__(self, initializer: tf.keras.initializers.Initializer = 'zeros', cache_encoding: bool = False, state_prefix: Optional[str] = None, **kwargs): """Initializes positional encoding. Args: initializer: A `str` of initializer for weighting the positional encoding. cache_encoding: A `bool`. If True, cache the positional encoding tensor after calling build. Otherwise, rebuild the tensor for every call. Setting this to False can be useful when we want to input a variable number of frames, so the positional encoding tensor can change shape. state_prefix: a prefix string to identify states. **kwargs: Additional keyword arguments to be passed to this layer. Returns: A `tf.Tensor` of which should have the same shape as input. """ super(PositionalEncoding, self).__init__(**kwargs) self._initializer = initializer self._cache_encoding = cache_encoding self._pos_encoding = None self._rezero = Scale(initializer=initializer, name='rezero') state_prefix = state_prefix if state_prefix is not None else '' self._state_prefix = state_prefix self._frame_count_name = f'{state_prefix}_pos_enc_frame_count' def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'initializer': self._initializer, 'cache_encoding': self._cache_encoding, 'state_prefix': self._state_prefix, } base_config = super(PositionalEncoding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _positional_encoding(self, num_positions: Union[int, tf.Tensor], hidden_size: Union[int, tf.Tensor], start_position: Union[int, tf.Tensor] = 0, dtype: str = 'float32') -> tf.Tensor: """Creates a sequence of sinusoidal positional encoding vectors. Args: num_positions: the total number of positions (frames). hidden_size: the number of channels used for the hidden vectors. start_position: the start position. dtype: the dtype of the output tensor. Returns: The positional encoding tensor with shape [num_positions, hidden_size]. """ if isinstance(start_position, tf.Tensor) and start_position.shape.rank == 1: start_position = start_position[0] # Calling `tf.range` with `dtype=tf.bfloat16` results in an error, # so we cast afterward. positions = tf.range(start_position, start_position + num_positions) positions = tf.cast(positions, dtype)[:, tf.newaxis] idx = tf.range(hidden_size)[tf.newaxis, :] power = tf.cast(2 * (idx // 2), dtype) power /= tf.cast(hidden_size, dtype) angles = 1. / tf.math.pow(10_000., power) radians = positions * angles sin = tf.math.sin(radians[:, 0::2]) cos = tf.math.cos(radians[:, 1::2]) pos_encoding = tf.concat([sin, cos], axis=-1) return pos_encoding def _get_pos_encoding(self, input_shape: tf.Tensor, frame_count: int = 0) -> tf.Tensor: """Calculates the positional encoding from the input shape. Args: input_shape: the shape of the input. frame_count: a count of frames that indicates the index of the first frame. Returns: The positional encoding tensor with shape [num_positions, hidden_size]. """ frames = input_shape[1] channels = input_shape[-1] pos_encoding = self._positional_encoding( frames, channels, start_position=frame_count, dtype=self.dtype) pos_encoding = tf.reshape(pos_encoding, [1, frames, 1, 1, channels]) return pos_encoding def build(self, input_shape): """Builds the layer with the given input shape. Args: input_shape: The input shape. Raises: ValueError: If using 'channels_first' data format. """ if tf.keras.backend.image_data_format() == 'channels_first': raise ValueError('"channels_first" mode is unsupported.') if self._cache_encoding: self._pos_encoding = self._get_pos_encoding(input_shape) super(PositionalEncoding, self).build(input_shape) def call( self, inputs: tf.Tensor, states: Optional[States] = None, output_states: bool = True, ) -> Union[tf.Tensor, Tuple[tf.Tensor, States]]: """Calls the layer with the given inputs. Args: inputs: An input `tf.Tensor`. states: A `dict` of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Expected keys include `state_prefix + '_pos_enc_frame_count'`. output_states: A `bool`. If True, returns the output tensor and output states. Returns just the output tensor otherwise. Returns: An output `tf.Tensor` (and optionally the states if `output_states=True`). Raises: ValueError: If using 'channels_first' data format. """ states = dict(states) if states is not None else {} # Keep a count of frames encountered across input iterations in # num_frames to be able to accurately update the positional encoding. num_frames = tf.shape(inputs)[1] frame_count = tf.cast(states.get(self._frame_count_name, [0]), tf.int32) states[self._frame_count_name] = frame_count + num_frames if self._cache_encoding: pos_encoding = self._pos_encoding else: pos_encoding = self._get_pos_encoding( tf.shape(inputs), frame_count=frame_count) pos_encoding = tf.cast(pos_encoding, inputs.dtype) pos_encoding = self._rezero(pos_encoding) outputs = inputs + pos_encoding return (outputs, states) if output_states else outputs @tf.keras.utils.register_keras_serializable(package='Vision') class GlobalAveragePool3D(tf.keras.layers.Layer): """Creates a global average pooling layer with causal mode. Implements causal mode, which runs a cumulative sum (with `tf.cumsum`) across frames in the time dimension, allowing the use of a stream buffer. Sums any valid input state with the current input to allow state to accumulate over several iterations. """ def __init__(self, keepdims: bool = False, causal: bool = False, state_prefix: Optional[str] = None, **kwargs): """Initializes a global average pool layer. Args: keepdims: A `bool`. If True, keep the averaged dimensions. causal: A `bool` of whether to run in causal mode with a cumulative sum across frames. state_prefix: a prefix string to identify states. **kwargs: Additional keyword arguments to be passed to this layer. Returns: An output `tf.Tensor`. """ super(GlobalAveragePool3D, self).__init__(**kwargs) self._keepdims = keepdims self._causal = causal state_prefix = state_prefix if state_prefix is not None else '' self._state_prefix = state_prefix self._state_name = f'{state_prefix}_pool_buffer' self._frame_count_name = f'{state_prefix}_pool_frame_count' def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'keepdims': self._keepdims, 'causal': self._causal, 'state_prefix': self._state_prefix, } base_config = super(GlobalAveragePool3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, states: Optional[States] = None, output_states: bool = False ) -> Union[tf.Tensor, Tuple[tf.Tensor, States]]: """Calls the layer with the given inputs. Args: inputs: An input `tf.Tensor`. states: A `dict` of states such that, if any of the keys match for this layer, will overwrite the contents of the buffer(s). Expected keys include `state_prefix + '__pool_buffer'` and `state_prefix + '__pool_frame_count'`. output_states: A `bool`. If True, returns the output tensor and output states. Returns just the output tensor otherwise. Returns: An output `tf.Tensor` (and optionally the states if `output_states=True`). If `causal=True`, the output tensor will have shape `[batch_size, num_frames, 1, 1, channels]` if `keepdims=True`. We keep the frame dimension in this case to simulate a cumulative global average as if we are inputting one frame at a time. If `causal=False`, the output is equivalent to `tf.keras.layers.GlobalAveragePooling3D` with shape `[batch_size, 1, 1, 1, channels]` if `keepdims=True` (plus the optional buffer stored in `states`). Raises: ValueError: If using 'channels_first' data format. """ states = dict(states) if states is not None else {} if tf.keras.backend.image_data_format() == 'channels_first': raise ValueError('"channels_first" mode is unsupported.') # Shape: [batch_size, 1, 1, 1, channels] buffer = states.get(self._state_name, None) if buffer is None: buffer = tf.zeros_like(inputs[:, :1, :1, :1], dtype=inputs.dtype) states[self._state_name] = buffer # Keep a count of frames encountered across input iterations in # num_frames to be able to accurately take a cumulative average across # all frames when running in streaming mode num_frames = tf.shape(inputs)[1] frame_count = states.get(self._frame_count_name, tf.constant([0])) frame_count = tf.cast(frame_count, tf.int32) states[self._frame_count_name] = frame_count + num_frames if self._causal: # Take a mean of spatial dimensions to make computation more efficient. x = tf.reduce_mean(inputs, axis=[2, 3], keepdims=True) x = tf.cumsum(x, axis=1) x = x + buffer # The last frame will be the value of the next state # Shape: [batch_size, 1, 1, 1, channels] states[self._state_name] = x[:, -1:] # In causal mode, the divisor increments by 1 for every frame to # calculate cumulative averages instead of one global average mean_divisors = tf.range(num_frames) + frame_count + 1 mean_divisors = tf.reshape(mean_divisors, [1, num_frames, 1, 1, 1]) mean_divisors = tf.cast(mean_divisors, x.dtype) # Shape: [batch_size, num_frames, 1, 1, channels] x = x / mean_divisors else: # In non-causal mode, we (optionally) sum across frames to take a # cumulative average across input iterations rather than individual # frames. If no buffer state is passed, this essentially becomes # regular global average pooling. # Shape: [batch_size, 1, 1, 1, channels] x = tf.reduce_sum(inputs, axis=(1, 2, 3), keepdims=True) x = x / tf.cast(tf.shape(inputs)[2] * tf.shape(inputs)[3], x.dtype) x = x + buffer # Shape: [batch_size, 1, 1, 1, channels] states[self._state_name] = x x = x / tf.cast(frame_count + num_frames, x.dtype) if not self._keepdims: x = tf.squeeze(x, axis=(1, 2, 3)) return (x, states) if output_states else x @tf.keras.utils.register_keras_serializable(package='Vision') class SpatialAveragePool3D(tf.keras.layers.Layer): """Creates a global average pooling layer pooling across spatial dimentions.""" def __init__(self, keepdims: bool = False, **kwargs): """Initializes a global average pool layer. Args: keepdims: A `bool`. If True, keep the averaged dimensions. **kwargs: Additional keyword arguments to be passed to this layer. Returns: An output `tf.Tensor`. """ super(SpatialAveragePool3D, self).__init__(**kwargs) self._keepdims = keepdims def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'keepdims': self._keepdims, } base_config = super(SpatialAveragePool3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Builds the layer with the given input shape.""" if tf.keras.backend.image_data_format() == 'channels_first': raise ValueError('"channels_first" mode is unsupported.') super(SpatialAveragePool3D, self).build(input_shape) def call(self, inputs, states=None, output_states: bool = False): """Calls the layer with the given inputs.""" if inputs.shape.rank != 5: raise ValueError( 'Input should have rank {}, got {}'.format(5, inputs.shape.rank)) output = tf.reduce_mean(inputs, axis=(2, 3), keepdims=self._keepdims) return (output, states) if output_states else output class CausalConvMixin: """Mixin class to implement CausalConv for `tf.keras.layers.Conv` layers.""" @property def use_buffered_input(self) -> bool: return self._use_buffered_input @use_buffered_input.setter def use_buffered_input(self, variable: bool): self._use_buffered_input = variable def _compute_buffered_causal_padding(self, inputs: tf.Tensor, use_buffered_input: bool = False, time_axis: int = 1, ) -> List[List[int]]: """Calculates padding for 'causal' option for conv layers. Args: inputs: An optional input `tf.Tensor` to be padded. use_buffered_input: A `bool`. If True, use 'valid' padding along the time dimension. This should be set when applying the stream buffer. time_axis: An `int` of the axis of the time dimension. Returns: A list of paddings for `tf.pad`. """ input_shape = tf.shape(inputs)[1:-1] if tf.keras.backend.image_data_format() == 'channels_first': raise ValueError('"channels_first" mode is unsupported.') kernel_size_effective = [ (self.kernel_size[i] + (self.kernel_size[i] - 1) * (self.dilation_rate[i] - 1)) for i in range(self.rank) ] pad_total = [kernel_size_effective[0] - 1] for i in range(1, self.rank): overlap = (input_shape[i] - 1) % self.strides[i] + 1 pad_total.append(tf.maximum(kernel_size_effective[i] - overlap, 0)) pad_beg = [pad_total[i] // 2 for i in range(self.rank)] pad_end = [pad_total[i] - pad_beg[i] for i in range(self.rank)] padding = [[pad_beg[i], pad_end[i]] for i in range(self.rank)] padding = [[0, 0]] + padding + [[0, 0]] if use_buffered_input: padding[time_axis] = [0, 0] else: padding[time_axis] = [padding[time_axis][0] + padding[time_axis][1], 0] return padding def _causal_validate_init(self): """Validates the Conv layer initial configuration.""" # Overriding this method is meant to circumvent unnecessary errors when # using causal padding. if (self.filters is not None and self.filters % self.groups != 0): raise ValueError( 'The number of filters must be evenly divisible by the number of ' 'groups. Received: groups={}, filters={}'.format( self.groups, self.filters)) if not all(self.kernel_size): raise ValueError('The argument `kernel_size` cannot contain 0(s). ' 'Received: %s' % (self.kernel_size,)) def _buffered_spatial_output_shape(self, spatial_output_shape: List[int]): """Computes the spatial output shape from the input shape.""" # When buffer padding, use 'valid' padding across time. The output shape # across time should be the input shape minus any padding, assuming # the stride across time is 1. if self._use_buffered_input and spatial_output_shape[0] is not None: padding = self._compute_buffered_causal_padding( tf.zeros([1] + spatial_output_shape + [1]), use_buffered_input=False) spatial_output_shape[0] -= sum(padding[1]) return spatial_output_shape @tf.keras.utils.register_keras_serializable(package='Vision') class Conv2D(tf.keras.layers.Conv2D, CausalConvMixin): """Conv2D layer supporting CausalConv. Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`), which applies causal padding to the temporal dimension, and same padding in the spatial dimensions. """ def __init__(self, *args, use_buffered_input=False, **kwargs): """Initializes conv2d. Args: *args: Arguments to be passed. use_buffered_input: A `bool`. If True, the input is expected to be padded beforehand. In effect, calling this layer will use 'valid' padding on the temporal dimension to simulate 'causal' padding. **kwargs: Additional keyword arguments to be passed. Returns: An output `tf.Tensor` of the Conv2D operation. """ super(Conv2D, self).__init__(*args, **kwargs) self._use_buffered_input = use_buffered_input def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'use_buffered_input': self._use_buffered_input, } base_config = super(Conv2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _compute_causal_padding(self, inputs): """Computes causal padding dimensions for the given inputs.""" return self._compute_buffered_causal_padding( inputs, use_buffered_input=self._use_buffered_input) def _validate_init(self): """Validates the Conv layer initial configuration.""" self._causal_validate_init() def _spatial_output_shape(self, spatial_input_shape: List[int]): """Computes the spatial output shape from the input shape.""" shape = super(Conv2D, self)._spatial_output_shape(spatial_input_shape) return self._buffered_spatial_output_shape(shape) @tf.keras.utils.register_keras_serializable(package='Vision') class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, CausalConvMixin): """DepthwiseConv2D layer supporting CausalConv. Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`), which applies causal padding to the temporal dimension, and same padding in the spatial dimensions. """ def __init__(self, *args, use_buffered_input=False, **kwargs): """Initializes depthwise conv2d. Args: *args: Arguments to be passed. use_buffered_input: A `bool`. If True, the input is expected to be padded beforehand. In effect, calling this layer will use 'valid' padding on the temporal dimension to simulate 'causal' padding. **kwargs: Additional keyword arguments to be passed. Returns: An output `tf.Tensor` of the DepthwiseConv2D operation. """ super(DepthwiseConv2D, self).__init__(*args, **kwargs) self._use_buffered_input = use_buffered_input # Causal padding is unsupported by default for DepthwiseConv2D, # so we resort to valid padding internally. However, we handle # causal padding as a special case with `self._is_causal`, which is # defined by the super class. if self.padding == 'causal': self.padding = 'valid' def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'use_buffered_input': self._use_buffered_input, } base_config = super(DepthwiseConv2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): """Calls the layer with the given inputs.""" if self._is_causal: inputs = tf.pad(inputs, self._compute_causal_padding(inputs)) return super(DepthwiseConv2D, self).call(inputs) def _compute_causal_padding(self, inputs): """Computes causal padding dimensions for the given inputs.""" return self._compute_buffered_causal_padding( inputs, use_buffered_input=self._use_buffered_input) def _validate_init(self): """Validates the Conv layer initial configuration.""" self._causal_validate_init() def _spatial_output_shape(self, spatial_input_shape: List[int]): """Computes the spatial output shape from the input shape.""" shape = super(DepthwiseConv2D, self)._spatial_output_shape( spatial_input_shape) return self._buffered_spatial_output_shape(shape) @tf.keras.utils.register_keras_serializable(package='Vision') class Conv3D(tf.keras.layers.Conv3D, CausalConvMixin): """Conv3D layer supporting CausalConv. Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`), which applies causal padding to the temporal dimension, and same padding in the spatial dimensions. """ def __init__(self, *args, use_buffered_input=False, **kwargs): """Initializes conv3d. Args: *args: Arguments to be passed. use_buffered_input: A `bool`. If True, the input is expected to be padded beforehand. In effect, calling this layer will use 'valid' padding on the temporal dimension to simulate 'causal' padding. **kwargs: Additional keyword arguments to be passed. Returns: An output `tf.Tensor` of the Conv3D operation. """ super(Conv3D, self).__init__(*args, **kwargs) self._use_buffered_input = use_buffered_input def get_config(self): """Returns a dictionary containing the config used for initialization.""" config = { 'use_buffered_input': self._use_buffered_input, } base_config = super(Conv3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): """Call the layer with the given inputs.""" # Note: tf.nn.conv3d with depthwise kernels on CPU is currently only # supported when compiling with TF graph (XLA) using tf.function, so it # is compiled by default here (b/186463870). conv_fn = tf.function(super(Conv3D, self).call, jit_compile=True) return conv_fn(inputs) def _compute_causal_padding(self, inputs): """Computes causal padding dimensions for the given inputs.""" return self._compute_buffered_causal_padding( inputs, use_buffered_input=self._use_buffered_input) def _validate_init(self): """Validates the Conv layer initial configuration.""" self._causal_validate_init() def _spatial_output_shape(self, spatial_input_shape: List[int]): """Computes the spatial output shape from the input shape.""" shape = super(Conv3D, self)._spatial_output_shape(spatial_input_shape) return self._buffered_spatial_output_shape(shape) @tf.keras.utils.register_keras_serializable(package='Vision') class SpatialPyramidPooling(tf.keras.layers.Layer): """Implements the Atrous Spatial Pyramid Pooling. References: [Rethinking Atrous Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1706.05587.pdf) [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) """ def __init__( self, output_channels: int, dilation_rates: List[int], pool_kernel_size: Optional[List[int]] = None, use_sync_bn: bool = False, batchnorm_momentum: float = 0.99, batchnorm_epsilon: float = 0.001, activation: str = 'relu', dropout: float = 0.5, kernel_initializer: str = 'GlorotUniform', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, interpolation: str = 'bilinear', use_depthwise_convolution: bool = False, **kwargs): """Initializes `SpatialPyramidPooling`. Args: output_channels: Number of channels produced by SpatialPyramidPooling. dilation_rates: A list of integers for parallel dilated conv. pool_kernel_size: A list of integers or None. If None, global average pooling is applied, otherwise an average pooling of pool_kernel_size is applied. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for type of activation to be used. Defaults to 'relu'. dropout: A float for the dropout rate before output. Defaults to 0.5. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. use_depthwise_convolution: Allows spatial pooling to be separable depthwise convolusions. [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1802.02611.pdf) **kwargs: Other keyword arguments for the layer. """ super().__init__(**kwargs) self._output_channels = output_channels self._dilation_rates = dilation_rates self._use_sync_bn = use_sync_bn self._batchnorm_momentum = batchnorm_momentum self._batchnorm_epsilon = batchnorm_epsilon self._activation = activation self._dropout = dropout self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._interpolation = interpolation self._pool_kernel_size = pool_kernel_size self._use_depthwise_convolution = use_depthwise_convolution self._activation_fn = tf_utils.get_activation(activation) self._bn_op = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 def build(self, input_shape): height = input_shape[1] width = input_shape[2] channels = input_shape[3] self.aspp_layers = [] conv1 = tf.keras.layers.Conv2D( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False) norm1 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon, synchronized=self._use_sync_bn) self.aspp_layers.append([conv1, norm1]) for dilation_rate in self._dilation_rates: leading_layers = [] kernel_size = (3, 3) if self._use_depthwise_convolution: leading_layers += [ tf.keras.layers.DepthwiseConv2D( depth_multiplier=1, kernel_size=kernel_size, padding='same', depthwise_regularizer=self._kernel_regularizer, depthwise_initializer=tf_utils.clone_initializer( self._kernel_initializer), dilation_rate=dilation_rate, use_bias=False) ] kernel_size = (1, 1) conv_dilation = leading_layers + [ tf.keras.layers.Conv2D( filters=self._output_channels, kernel_size=kernel_size, padding='same', kernel_regularizer=self._kernel_regularizer, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), dilation_rate=dilation_rate, use_bias=False) ] norm_dilation = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon, synchronized=self._use_sync_bn) self.aspp_layers.append(conv_dilation + [norm_dilation]) if self._pool_kernel_size is None: pooling = [ tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Reshape((1, 1, channels)) ] else: pooling = [tf.keras.layers.AveragePooling2D(self._pool_kernel_size)] conv2 = tf.keras.layers.Conv2D( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False) norm2 = self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon, synchronized=self._use_sync_bn) self.aspp_layers.append(pooling + [conv2, norm2]) self._resizing_layer = tf.keras.layers.Resizing( height, width, interpolation=self._interpolation, dtype=tf.float32) self._projection = [ tf.keras.layers.Conv2D( filters=self._output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, use_bias=False), self._bn_op( axis=self._bn_axis, momentum=self._batchnorm_momentum, epsilon=self._batchnorm_epsilon, synchronized=self._use_sync_bn) ] self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout) self._concat_layer = tf.keras.layers.Concatenate(axis=-1) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: if training is None: training = tf.keras.backend.learning_phase() result = [] for i, layers in enumerate(self.aspp_layers): x = inputs for layer in layers: # Apply layers sequentially. x = layer(x, training=training) x = self._activation_fn(x) # Apply resize layer to the end of the last set of layers. if i == len(self.aspp_layers) - 1: x = self._resizing_layer(x) result.append(tf.cast(x, inputs.dtype)) x = self._concat_layer(result) for layer in self._projection: x = layer(x, training=training) x = self._activation_fn(x) return self._dropout_layer(x) def get_config(self): config = { 'output_channels': self._output_channels, 'dilation_rates': self._dilation_rates, 'pool_kernel_size': self._pool_kernel_size, 'use_sync_bn': self._use_sync_bn, 'batchnorm_momentum': self._batchnorm_momentum, 'batchnorm_epsilon': self._batchnorm_epsilon, 'activation': self._activation, 'dropout': self._dropout, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'interpolation': self._interpolation, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @tf.keras.utils.register_keras_serializable(package='Vision') class MultiHeadAttention(tf.keras.layers.MultiHeadAttention): """MultiHeadAttention layer. This is an implementation of multi-headed attention as described in the paper "Attention is all you Need" (Vaswani et al., 2017). """ def __init__( self, *args, partition_dims: Optional[Tuple[int, int, int, int]] = None, max_inference_parallelism: Optional[int] = None, **kwargs, ): """Initializes MultiHeadAttention. Args: *args: Positional arguments passed to super().__init__. partition_dims: Spatial partition dimensions. max_inference_parallelism: The number of examples to run in parallel during inference. Set this limit to reduce the peak memory usage. If None, use vectorized operations to run the whole batch in parallel. **kwargs: Keyword arguments passed to super().__init__. """ super().__init__(*args, **kwargs) self._partition_dims = partition_dims self._max_inference_parallelism = max_inference_parallelism def get_config(self): config = super().get_config() config.update({ 'partition_dims': self._partition_dims, 'max_inference_parallelism': self._max_inference_parallelism, }) return config def _compute_attention( self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, training: Optional[bool] = None, ): """Applies dot-product attention with query, key, value tensors. Args: query: Projected query `Tensor` of shape `(B, T, N, key_dim)`. key: Projected key `Tensor` of shape `(B, S, N, key_dim)`. value: Projected value `Tensor` of shape `(B, S, N, value_dim)`. attention_mask: a boolean mask of shape `(B, T, S)`, that prevents attention to certain positions. It is generally not needed if the `query` and `value` (and/or `key`) are masked. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Returns: attention_output: Multi-headed outputs of attention computation. attention_scores: Multi-headed attention weights. """ if self._partition_dims is not None: strategy = tf.distribute.get_strategy() # `query` = [B, T, N ,H] query = strategy.experimental_split_to_logical_devices( query, self._partition_dims) key = strategy.experimental_split_to_logical_devices( key, self._partition_dims) value = strategy.experimental_split_to_logical_devices( value, self._partition_dims) batch_size = query.get_shape().as_list()[0] # None if dynamic. if ( training or self._max_inference_parallelism is None or self._max_inference_parallelism <= 0 or ( # If the whole batch is allowed to be run in parallel, use fully # vectorized computation instead of tf.map_fn to make things more # efficient. batch_size is not None and batch_size <= self._max_inference_parallelism ) ): return self._compute_attention_delegate( query, key, value, attention_mask, training ) else: # Sequentialize the inference execution with limited parallelism. def _compute_fn(x): attention_output, attention_scores = self._compute_attention_delegate( query=x[0][tf.newaxis, ...], key=x[1][tf.newaxis, ...], value=x[2][tf.newaxis, ...], attention_mask=x[3][tf.newaxis, ...] if len(x) >= 4 else None, training=training, ) attention_output = tf.squeeze(attention_output, axis=0) attention_scores = tf.squeeze(attention_scores, axis=0) return attention_output, attention_scores if attention_mask is not None: elems = [query, key, value, attention_mask] else: elems = [query, key, value] return tf.map_fn( fn=_compute_fn, elems=elems, fn_output_signature=(value.dtype, value.dtype), parallel_iterations=self._max_inference_parallelism, ) def _compute_attention_delegate( self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, training: Optional[bool] = None, ): """Implements dot-product attention with query, key, value tensors.""" # Simply calls the implementation of the super class here, while the users # can override this function for customizing attention computation. return super()._compute_attention( query, key, value, attention_mask, training )
52,972
36.489738
81
py
models
models-master/official/vision/modeling/layers/nn_blocks_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_blocks.""" from typing import Any, Iterable, Tuple # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: """Returns the combinations of end-to-end tests to run.""" return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ],) class NNBlocksTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (nn_blocks.ResidualBlock, 1, False, 0.0, None), (nn_blocks.ResidualBlock, 2, True, 0.2, 0.25), ) def test_residual_block_creation(self, block_fn, strides, use_projection, stochastic_depth_drop_rate, se_ratio): input_size = 128 filter_size = 256 inputs = tf.keras.Input( shape=(input_size, input_size, filter_size), batch_size=1) block = block_fn( filter_size, strides, use_projection=use_projection, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, ) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, filter_size], features.shape.as_list()) def test_layerscale_call(self): # Set up test inputs input_shape = (2, 3, 4) init_values = 1e-4 inputs = tf.ones(input_shape, dtype=tf.float32) # Instantiate LayerScale object layer_scale = nn_blocks.LayerScale(init_values) # Call LayerScale object on test inputs output = layer_scale(inputs) # Check output shape expected_output_shape = input_shape self.assertAllEqual(output.shape, expected_output_shape) # Check that output values are correct expected_output_values = init_values * np.ones(input_shape) self.assertAllClose( output.numpy(), expected_output_values, rtol=1e-5, atol=1e-5) def test_layerscale_training(self): # Verify that gamma values have changed from their initial values in one # step forward pass. # Set up test inputs input_shape = (1, 3, 4) init_values = 1e-4 inputs = tf.ones(input_shape, dtype=tf.float32) targets = tf.ones(input_shape, dtype=tf.float32) # Instantiate LayerScale object layer_scale = nn_blocks.LayerScale(init_values) # Define optimizer and loss function optimizer = tf.keras.optimizers.Adam() loss_fn = tf.keras.losses.MeanSquaredError() # Train the model for one step with tf.GradientTape() as tape: predictions = layer_scale(inputs) loss = loss_fn(targets, predictions) grads = tape.gradient(loss, layer_scale.trainable_variables) optimizer.apply_gradients(zip(grads, layer_scale.trainable_variables)) # Check that gamma values have changed updated_gamma = layer_scale.gamma.numpy()[0, 0, 0] self.assertNotEqual(updated_gamma, init_values) @parameterized.parameters( (nn_blocks.BottleneckBlock, 1, False, 0.0, None), (nn_blocks.BottleneckBlock, 2, True, 0.2, 0.25), ) def test_bottleneck_block_creation(self, block_fn, strides, use_projection, stochastic_depth_drop_rate, se_ratio): input_size = 128 filter_size = 256 inputs = tf.keras.Input( shape=(input_size, input_size, filter_size * 4), batch_size=1) block = block_fn( filter_size, strides, use_projection=use_projection, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, filter_size * 4], features.shape.as_list()) @parameterized.parameters( (nn_blocks.InvertedBottleneckBlock, 1, 1, None, None), (nn_blocks.InvertedBottleneckBlock, 6, 1, None, None), (nn_blocks.InvertedBottleneckBlock, 1, 2, None, None), (nn_blocks.InvertedBottleneckBlock, 1, 1, 0.2, None), (nn_blocks.InvertedBottleneckBlock, 1, 1, None, 0.2), ) def test_invertedbottleneck_block_creation(self, block_fn, expand_ratio, strides, se_ratio, stochastic_depth_drop_rate): input_size = 128 in_filters = 24 out_filters = 40 inputs = tf.keras.Input( shape=(input_size, input_size, in_filters), batch_size=1) block = block_fn( in_filters=in_filters, out_filters=out_filters, expand_ratio=expand_ratio, strides=strides, se_ratio=se_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, out_filters], features.shape.as_list()) @parameterized.parameters( (nn_blocks.TuckerConvBlock, 1, 0.25, 0.25), (nn_blocks.TuckerConvBlock, 2, 0.25, 0.25), ) def test_tucker_conv_block(self, block_fn, strides, input_compression_ratio, output_compression_ratio): input_size = 128 in_filters = 24 out_filters = 24 inputs = tf.keras.Input( shape=(input_size, input_size, in_filters), batch_size=1) block = block_fn( in_filters=in_filters, out_filters=out_filters, input_compression_ratio=input_compression_ratio, output_compression_ratio=output_compression_ratio, strides=strides) features = block(inputs) self.assertAllEqual( [1, input_size // strides, input_size // strides, out_filters], features.shape.as_list()) class ResidualInnerTest(parameterized.TestCase, tf.test.TestCase): @combinations.generate(distribution_strategy_combinations()) def test_shape(self, distribution): bsz, h, w, c = 8, 32, 32, 32 filters = 64 strides = 2 input_tensor = tf.random.uniform(shape=[bsz, h, w, c]) with distribution.scope(): test_layer = nn_blocks.ResidualInner(filters, strides) output = test_layer(input_tensor) expected_output_shape = [bsz, h // strides, w // strides, filters] self.assertEqual(expected_output_shape, output.shape.as_list()) class BottleneckResidualInnerTest(parameterized.TestCase, tf.test.TestCase): @combinations.generate(distribution_strategy_combinations()) def test_shape(self, distribution): bsz, h, w, c = 8, 32, 32, 32 filters = 64 strides = 2 input_tensor = tf.random.uniform(shape=[bsz, h, w, c]) with distribution.scope(): test_layer = nn_blocks.BottleneckResidualInner(filters, strides) output = test_layer(input_tensor) expected_output_shape = [bsz, h // strides, w // strides, filters * 4] self.assertEqual(expected_output_shape, output.shape.as_list()) class DepthwiseSeparableConvBlockTest(parameterized.TestCase, tf.test.TestCase): @combinations.generate(distribution_strategy_combinations()) def test_shape(self, distribution): batch_size, height, width, num_channels = 8, 32, 32, 32 num_filters = 64 strides = 2 input_tensor = tf.random.normal( shape=[batch_size, height, width, num_channels]) with distribution.scope(): block = nn_blocks.DepthwiseSeparableConvBlock( num_filters, strides=strides) config_dict = block.get_config() recreate_block = nn_blocks.DepthwiseSeparableConvBlock(**config_dict) output_tensor = block(input_tensor) expected_output_shape = [ batch_size, height // strides, width // strides, num_filters ] self.assertEqual(output_tensor.shape.as_list(), expected_output_shape) output_tensor = recreate_block(input_tensor) self.assertEqual(output_tensor.shape.as_list(), expected_output_shape) class ReversibleLayerTest(parameterized.TestCase, tf.test.TestCase): @combinations.generate(distribution_strategy_combinations()) def test_downsampling_non_reversible_step(self, distribution): bsz, h, w, c = 8, 32, 32, 32 filters = 64 strides = 2 input_tensor = tf.random.uniform(shape=[bsz, h, w, c]) with distribution.scope(): f = nn_blocks.ResidualInner( filters=filters // 2, strides=strides, batch_norm_first=True) g = nn_blocks.ResidualInner( filters=filters // 2, strides=1, batch_norm_first=True) test_layer = nn_blocks.ReversibleLayer(f, g) test_layer.build(input_tensor.shape) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) @tf.function def step_fn(): with tf.GradientTape() as tape: output = test_layer(input_tensor, training=True) grads = tape.gradient(output, test_layer.trainable_variables) # Test applying gradients with optimizer works optimizer.apply_gradients(zip(grads, test_layer.trainable_variables)) return output replica_output = distribution.run(step_fn) outputs = distribution.experimental_local_results(replica_output) # Assert forward pass shape expected_output_shape = [bsz, h // strides, w // strides, filters] for output in outputs: self.assertEqual(expected_output_shape, output.shape.as_list()) @combinations.generate(distribution_strategy_combinations()) def test_reversible_step(self, distribution): # Reversible layers satisfy: (a) strides = 1 (b) in_filter = out_filter bsz, h, w, c = 8, 32, 32, 32 filters = c strides = 1 input_tensor = tf.random.uniform(shape=[bsz, h, w, c]) with distribution.scope(): f = nn_blocks.ResidualInner( filters=filters // 2, strides=strides, batch_norm_first=False) g = nn_blocks.ResidualInner( filters=filters // 2, strides=1, batch_norm_first=False) test_layer = nn_blocks.ReversibleLayer(f, g) test_layer(input_tensor, training=False) # init weights optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) @tf.function def step_fn(): with tf.GradientTape() as tape: output = test_layer(input_tensor, training=True) grads = tape.gradient(output, test_layer.trainable_variables) # Test applying gradients with optimizer works optimizer.apply_gradients(zip(grads, test_layer.trainable_variables)) return output @tf.function def fwd(): test_layer(input_tensor) distribution.run(fwd) # Initialize variables prev_variables = tf.identity_n(test_layer.trainable_variables) replica_output = distribution.run(step_fn) outputs = distribution.experimental_local_results(replica_output) # Assert variables values have changed values for v0, v1 in zip(prev_variables, test_layer.trainable_variables): self.assertNotAllEqual(v0, v1) # Assert forward pass shape expected_output_shape = [bsz, h // strides, w // strides, filters] for output in outputs: self.assertEqual(expected_output_shape, output.shape.as_list()) @combinations.generate(distribution_strategy_combinations()) def test_manual_gradients_correctness(self, distribution): bsz, h, w, c = 8, 32, 32, 32 filters = c strides = 1 input_tensor = tf.random.uniform(shape=[bsz, h, w, c * 4]) # bottleneck with distribution.scope(): f_manual = nn_blocks.BottleneckResidualInner( filters=filters // 2, strides=strides, batch_norm_first=False) g_manual = nn_blocks.BottleneckResidualInner( filters=filters // 2, strides=1, batch_norm_first=False) manual_grad_layer = nn_blocks.ReversibleLayer(f_manual, g_manual) manual_grad_layer(input_tensor, training=False) # init weights f_auto = nn_blocks.BottleneckResidualInner( filters=filters // 2, strides=strides, batch_norm_first=False) g_auto = nn_blocks.BottleneckResidualInner( filters=filters // 2, strides=1, batch_norm_first=False) auto_grad_layer = nn_blocks.ReversibleLayer( f_auto, g_auto, manual_grads=False) auto_grad_layer(input_tensor) # init weights # Clone all weights (tf.keras.layers.Layer has no .clone()) auto_grad_layer._f.set_weights(manual_grad_layer._f.get_weights()) auto_grad_layer._g.set_weights(manual_grad_layer._g.get_weights()) @tf.function def manual_fn(): with tf.GradientTape() as tape: output = manual_grad_layer(input_tensor, training=True) grads = tape.gradient(output, manual_grad_layer.trainable_variables) return grads @tf.function def auto_fn(): with tf.GradientTape() as tape: output = auto_grad_layer(input_tensor, training=True) grads = tape.gradient(output, auto_grad_layer.trainable_variables) return grads manual_grads = distribution.run(manual_fn) auto_grads = distribution.run(auto_fn) # Assert gradients calculated manually are close to that from autograd for manual_grad, auto_grad in zip(manual_grads, auto_grads): self.assertAllClose( distribution.experimental_local_results(manual_grad), distribution.experimental_local_results(auto_grad), atol=5e-3, rtol=5e-3) # Verify that BN moving mean and variance is correct. for manual_var, auto_var in zip(manual_grad_layer.non_trainable_variables, auto_grad_layer.non_trainable_variables): self.assertAllClose(manual_var, auto_var) # Test class that wraps a standard attention layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. @tf.keras.utils.register_keras_serializable(package='TestOnlyAttention') class ValidatedAttentionLayer(nn_layers.MultiHeadAttention): def __init__(self, call_list, **kwargs): super(ValidatedAttentionLayer, self).__init__(**kwargs) self.list = call_list def call( self, query, value, attention_mask=None, return_attention_scores=False, ): self.list.append(True) return super(ValidatedAttentionLayer, self).call( query, value, attention_mask=attention_mask, return_attention_scores=return_attention_scores) def get_config(self): config = super(ValidatedAttentionLayer, self).get_config() config['call_list'] = self.list return config # Test class implements a simple feedforward layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. @tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward') class ValidatedFeedforwardLayer(tf.keras.layers.Layer): def __init__(self, call_list, activation, **kwargs): super(ValidatedFeedforwardLayer, self).__init__(**kwargs) self.list = call_list self.activation = activation def build(self, input_shape): hidden_size = input_shape[-1] self._feedforward_dense = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', activation=self.activation, name='feedforward') def call(self, inputs): self.list.append(True) return self._feedforward_dense(inputs) def get_config(self): config = super(ValidatedFeedforwardLayer, self).get_config() config['call_list'] = [] config['activation'] = self.activation return config class TransformerLayerTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(TransformerLayerTest, self).tearDown() tf.keras.mixed_precision.set_global_policy('float32') @parameterized.parameters(None, 2) def test_layer_creation(self, max_attention_inference_parallelism): sequence_length = 21 width = 80 attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': [] } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu', max_attention_inference_parallelism=max_attention_inference_parallelism, ) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) call_list = test_layer._attention_layer.get_config()['call_list'] # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_creation_with_feedforward_cls(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } feedforward_call_list = [] feedforward_layer_cfg = { 'activation': 'relu', 'call_list': feedforward_call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, feedforward_cls=ValidatedFeedforwardLayer, feedforward_cfg=feedforward_layer_cfg, num_attention_heads=10, inner_dim=None, inner_activation=None) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") self.assertNotEmpty(feedforward_call_list) self.assertTrue(feedforward_call_list[0], "The passed layer class wasn't instantiated.") def test_layer_creation_with_mask(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") @parameterized.parameters(None, 2) def test_layer_invocation(self, max_attention_inference_parallelism): sequence_length = 21 width = 80 attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': [], } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu', max_attention_inference_parallelism=max_attention_inference_parallelism) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # Create a model from the test layer. model = tf.keras.Model(data_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) _ = model.predict(input_data) call_list = test_layer._attention_layer.get_config()['call_list'] # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_invocation_with_feedforward_cls(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } feedforward_call_list = [] feedforward_layer_cfg = { 'activation': 'relu', 'call_list': feedforward_call_list, } feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg) test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, feedforward_cls=feedforward_layer, num_attention_heads=10, inner_dim=None, inner_activation=None) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") self.assertNotEmpty(feedforward_call_list) self.assertTrue(feedforward_call_list[0], "The passed layer class wasn't instantiated.") def test_layer_invocation_with_mask(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_invocation_with_float16_dtype(self): tf.keras.mixed_precision.set_global_policy('mixed_float16') sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (10 * np.random.random_sample( (batch_size, sequence_length, width))) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_transform_with_initializer(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0]) def test_layer_restoration_from_config(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, 'name': 'test_layer', } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, inner_dim=2048, inner_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) pre_serialization_output = model.predict([input_data, mask_data]) # Serialize the model config. Pass the serialized data through json to # ensure that we can serialize this layer to disk. serialized_data = model.get_config() # Create a new model from the old config, and copy the weights. These models # should have identical outputs. new_model = tf.keras.Model.from_config(serialized_data) new_model.set_weights(model.get_weights()) output = new_model.predict([input_data, mask_data]) self.assertAllClose(pre_serialization_output, output) # If the layer was configured correctly, it should have a list attribute # (since it should have the custom class and config passed to it). new_model.summary() new_call_list = new_model.get_layer( name='transformer_scaffold')._attention_layer.list self.assertNotEmpty(new_call_list) self.assertTrue(new_call_list[0], "The passed layer class wasn't instantiated.") def test_layer_with_feedforward_cls_restoration_from_config(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'key_dim': 8, 'call_list': call_list, 'name': 'test_layer', } feedforward_call_list = [] feedforward_layer_cfg = { 'activation': 'relu', 'call_list': feedforward_call_list, } test_layer = nn_blocks.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, feedforward_cls=ValidatedFeedforwardLayer, feedforward_cfg=feedforward_layer_cfg, num_attention_heads=10, inner_dim=None, inner_activation=None) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) pre_serialization_output = model.predict([input_data, mask_data]) serialized_data = model.get_config() # Create a new model from the old config, and copy the weights. These models # should have identical outputs. new_model = tf.keras.Model.from_config(serialized_data) new_model.set_weights(model.get_weights()) output = new_model.predict([input_data, mask_data]) self.assertAllClose(pre_serialization_output, output) # If the layer was configured correctly, it should have a list attribute # (since it should have the custom class and config passed to it). new_model.summary() new_call_list = new_model.get_layer( name='transformer_scaffold')._attention_layer.list self.assertNotEmpty(new_call_list) self.assertTrue(new_call_list[0], "The passed layer class wasn't instantiated.") new_feedforward_call_list = new_model.get_layer( name='transformer_scaffold')._feedforward_block.list self.assertNotEmpty(new_feedforward_call_list) self.assertTrue(new_feedforward_call_list[0], "The passed layer class wasn't instantiated.") if __name__ == '__main__': tf.test.main()
33,825
37.135287
80
py
models
models-master/official/vision/modeling/layers/nn_layers_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for nn_layers.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.layers import nn_layers class NNLayersTest(parameterized.TestCase, tf.test.TestCase): def test_scale(self): scale = nn_layers.Scale(initializer=tf.keras.initializers.constant(10.)) output = scale(3.) self.assertAllEqual(output, 30.) def test_temporal_softmax_pool(self): inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) layer = nn_layers.TemporalSoftmaxPool() output = layer(inputs) self.assertAllClose( output, [[[[[0.10153633]]], [[[0.33481020]]], [[[0.82801306]]], [[[1.82021690]]]]]) def test_positional_encoding(self): pos_encoding = nn_layers.PositionalEncoding( initializer='ones', cache_encoding=False) pos_encoding_cached = nn_layers.PositionalEncoding( initializer='ones', cache_encoding=True) inputs = tf.ones([1, 4, 1, 1, 3]) outputs, _ = pos_encoding(inputs) outputs_cached, _ = pos_encoding_cached(inputs) expected = tf.constant( [[[[[1.0000000, 1.0000000, 2.0000000]]], [[[1.8414710, 1.0021545, 1.5403023]]], [[[1.9092975, 1.0043088, 0.5838531]]], [[[1.1411200, 1.0064633, 0.0100075]]]]]) self.assertEqual(outputs.shape, expected.shape) self.assertAllClose(outputs, expected) self.assertEqual(outputs.shape, outputs_cached.shape) self.assertAllClose(outputs, outputs_cached) inputs = tf.ones([1, 5, 1, 1, 3]) _ = pos_encoding(inputs) def test_positional_encoding_bfloat16(self): pos_encoding = nn_layers.PositionalEncoding(initializer='ones') inputs = tf.ones([1, 4, 1, 1, 3], dtype=tf.bfloat16) outputs, _ = pos_encoding(inputs) expected = tf.constant( [[[[[1.0000000, 1.0000000, 2.0000000]]], [[[1.8414710, 1.0021545, 1.5403023]]], [[[1.9092975, 1.0043088, 0.5838531]]], [[[1.1411200, 1.0064633, 0.0100075]]]]]) self.assertEqual(outputs.shape, expected.shape) self.assertAllClose(outputs, expected) def test_global_average_pool_basic(self): pool = nn_layers.GlobalAveragePool3D(keepdims=True) inputs = tf.ones([1, 2, 3, 4, 1]) outputs = pool(inputs, output_states=False) expected = tf.ones([1, 1, 1, 1, 1]) self.assertEqual(outputs.shape, expected.shape) self.assertAllEqual(outputs, expected) def test_positional_encoding_stream(self): pos_encoding = nn_layers.PositionalEncoding( initializer='ones', cache_encoding=False) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 1, 1, 3]) expected, _ = pos_encoding(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, num_splits, axis=1) states = {} predicted = [] for frame in frames: output, states = pos_encoding(frame, states=states) predicted.append(output) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose(predicted, [[[[[1.0000000, 1.0000000, 2.0000000]]], [[[2.8414710, 2.0021544, 2.5403023]]], [[[3.9092975, 3.0043090, 2.5838532]]], [[[4.1411200, 4.0064630, 3.0100074]]]]]) def test_global_average_pool_keras(self): pool = nn_layers.GlobalAveragePool3D(keepdims=False) keras_pool = tf.keras.layers.GlobalAveragePooling3D() inputs = 10 * tf.random.normal([1, 2, 3, 4, 1]) outputs = pool(inputs, output_states=False) keras_output = keras_pool(inputs) self.assertAllEqual(outputs.shape, keras_output.shape) self.assertAllClose(outputs, keras_output) def test_stream_global_average_pool(self): gap = nn_layers.GlobalAveragePool3D(keepdims=True, causal=False) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 2, 3]) expected, _ = gap(inputs, output_states=True) for num_splits in [1, 2, 4]: frames = tf.split(inputs, num_splits, axis=1) states = {} predicted = None for frame in frames: predicted, states = gap(frame, states=states, output_states=True) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[2.5, 2.5, 2.5]]]]]) def test_causal_stream_global_average_pool(self): gap = nn_layers.GlobalAveragePool3D(keepdims=True, causal=True) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 2, 3]) expected, _ = gap(inputs, output_states=True) for num_splits in [1, 2, 4]: frames = tf.split(inputs, num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = gap(frame, states=states, output_states=True) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[1.0, 1.0, 1.0]]], [[[1.5, 1.5, 1.5]]], [[[2.0, 2.0, 2.0]]], [[[2.5, 2.5, 2.5]]]]]) def test_spatial_average_pool(self): pool = nn_layers.SpatialAveragePool3D(keepdims=True) inputs = tf.range(64, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 4, 4, 1]) output = pool(inputs) self.assertEqual(output.shape, [1, 4, 1, 1, 1]) self.assertAllClose( output, [[[[[8.50]]], [[[24.5]]], [[[40.5]]], [[[56.5]]]]]) def test_conv2d_causal(self): conv2d = nn_layers.Conv2D( filters=3, kernel_size=(3, 3), strides=(1, 2), padding='causal', use_buffered_input=True, kernel_initializer='ones', use_bias=False, ) inputs = tf.ones([1, 4, 2, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv2d(padded_inputs) expected = tf.constant( [[[[6.0, 6.0, 6.0]], [[12., 12., 12.]], [[18., 18., 18.]], [[18., 18., 18.]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) conv2d.use_buffered_input = False predicted = conv2d(inputs) self.assertFalse(conv2d.use_buffered_input) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_depthwise_conv2d_causal(self): conv2d = nn_layers.DepthwiseConv2D( kernel_size=(3, 3), strides=(1, 1), padding='causal', use_buffered_input=True, depthwise_initializer='ones', use_bias=False, ) inputs = tf.ones([1, 2, 2, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv2d(padded_inputs) expected = tf.constant( [[[[2., 2., 2.], [2., 2., 2.]], [[4., 4., 4.], [4., 4., 4.]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) conv2d.use_buffered_input = False predicted = conv2d(inputs) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_conv3d_causal(self): conv3d = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', use_buffered_input=True, kernel_initializer='ones', use_bias=False, ) inputs = tf.ones([1, 2, 4, 4, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv3d(padded_inputs) expected = tf.constant( [[[[[27., 27., 27.], [18., 18., 18.]], [[18., 18., 18.], [12., 12., 12.]]], [[[54., 54., 54.], [36., 36., 36.]], [[36., 36., 36.], [24., 24., 24.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) conv3d.use_buffered_input = False predicted = conv3d(inputs) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_depthwise_conv3d_causal(self): conv3d = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', use_buffered_input=True, kernel_initializer='ones', use_bias=False, groups=3, ) inputs = tf.ones([1, 2, 4, 4, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv3d(padded_inputs) expected = tf.constant( [[[[[9.0, 9.0, 9.0], [6.0, 6.0, 6.0]], [[6.0, 6.0, 6.0], [4.0, 4.0, 4.0]]], [[[18.0, 18.0, 18.0], [12., 12., 12.]], [[12., 12., 12.], [8., 8., 8.]]]]]) output_shape = conv3d._spatial_output_shape([4, 4, 4]) self.assertAllClose(output_shape, [2, 2, 2]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) conv3d.use_buffered_input = False predicted = conv3d(inputs) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_conv3d_causal_padding_2d(self): """Test to ensure causal padding works like standard padding.""" conv3d = nn_layers.Conv3D( filters=1, kernel_size=(1, 3, 3), strides=(1, 2, 2), padding='causal', use_buffered_input=False, kernel_initializer='ones', use_bias=False, ) keras_conv3d = tf.keras.layers.Conv3D( filters=1, kernel_size=(1, 3, 3), strides=(1, 2, 2), padding='same', kernel_initializer='ones', use_bias=False, ) inputs = tf.ones([1, 1, 4, 4, 1]) predicted = conv3d(inputs) expected = keras_conv3d(inputs) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose(predicted, [[[[[9.], [6.]], [[6.], [4.]]]]]) def test_conv3d_causal_padding_1d(self): """Test to ensure causal padding works like standard padding.""" conv3d = nn_layers.Conv3D( filters=1, kernel_size=(3, 1, 1), strides=(2, 1, 1), padding='causal', use_buffered_input=False, kernel_initializer='ones', use_bias=False, ) keras_conv1d = tf.keras.layers.Conv1D( filters=1, kernel_size=3, strides=2, padding='causal', kernel_initializer='ones', use_bias=False, ) inputs = tf.ones([1, 4, 1, 1, 1]) predicted = conv3d(inputs) expected = keras_conv1d(tf.squeeze(inputs, axis=[2, 3])) expected = tf.reshape(expected, [1, 2, 1, 1, 1]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose(predicted, [[[[[1.]]], [[[3.]]]]]) @parameterized.parameters( (None, []), (None, [6, 12, 18]), ([32, 32], [6, 12, 18]), ) def test_aspp(self, pool_kernel_size, dilation_rates): inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32) layer = nn_layers.SpatialPyramidPooling( output_channels=256, dilation_rates=dilation_rates, pool_kernel_size=pool_kernel_size) output = layer(inputs) self.assertAllEqual([None, 64, 64, 256], output.shape) @parameterized.parameters(None, 2) def test_multi_head_attention(self, max_inference_parallelism): layer = nn_layers.MultiHeadAttention( num_heads=12, key_dim=64, max_inference_parallelism=max_inference_parallelism, ) # Create a 3-dimensional input (the first dimension is implicit). query = tf.keras.Input(shape=(40, 80)) value = tf.keras.Input(shape=(20, 80)) output = layer(query=query, value=value) self.assertEqual(output.shape.as_list(), [None, 40, 80]) if __name__ == '__main__': tf.test.main()
13,309
29.73903
79
py
models
models-master/official/vision/modeling/layers/edgetpu_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests EdgeTPU oriented layers and tools.""" from typing import Optional from absl.testing import parameterized import numpy as np import tensorflow as tf from official.vision.modeling.layers import edgetpu def random_boxes(shape): a = tf.random.uniform(shape=shape+[2]) b = tf.random.uniform(shape=shape+[2]) l = tf.minimum(a, b) u = tf.maximum(a, b) return tf.concat([l, u], axis=-1) def _maximum_activation_size(model): max_size = 0 for layer in model.layers: outputs = layer.output if not isinstance(outputs, list): outputs = [outputs] for output in outputs: if hasattr(output, 'shape'): size = np.prod(output.shape) max_size = max(max_size, size) return max_size def _deviation_and_margin(reference, valid, optimized): """Returns deviation and margin between two batched sets of indices.""" deviation_rate = 0 min_union = reference.shape[1] + optimized.shape[1] runs = reference.shape[0] for run in range(runs): reference_slice = {*reference[run, :valid[run]].numpy().tolist()} optimized_slice = {*optimized[run].numpy().astype(int).tolist()} - {-1} union_size = len(optimized_slice | reference_slice) symdiff_size = len(optimized_slice ^ reference_slice) deviation_rate += symdiff_size / union_size min_union = min(min_union, union_size) deviation_rate = deviation_rate / runs # six sigma estimate via LLN theorem margin = 6 * (deviation_rate / np.sqrt(runs) + 1 / (runs * min_union)) return deviation_rate, margin class NonMaxSuppressionTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): super().setUp() tf.random.set_seed(42) def test_refinement_sample(self): """Tests difference in NMS behaviours. Runs on four boxes with following IOU table (only neighbours will qualify as similar boxes) box | 0 | 1 | 2 | 3 --- | ---- | ---- | ---- | ---- 0 | 1 | 7/13 | 1/4 | 1/19 1 | 7/13 | 1 | 7/13 | 1/4 2 | 1/4 | 7/13 | 1 | 7/13 3 | 1/19 | 1/4 | 7/13 | 1 So 0 is best box, it eliminates 1, next is box 2 which is eleminated by 1 if it is allowed (depending on number of refinements). """ boxes: tf.Tensor = tf.constant( [ # y1, x1, y2, x2 [0.0, 0.0, 1.0, 1.0], [0.0, 0.3, 1.0, 1.3], [0.0, 0.6, 1.0, 1.6], [0.0, 0.9, 1.0, 1.9], ], dtype=tf.float32) scores: tf.Tensor = tf.constant([ 1.0, 0.9, 0.8, 0.7, ], dtype=tf.float32) self.assertAllEqual( edgetpu.non_max_suppression_padded(boxes, scores, 4, refinements=0), tf.constant([0.0, -1.0, -1.0, -1.0], dtype=tf.float32)) self.assertAllEqual( edgetpu.non_max_suppression_padded(boxes, scores, 4, refinements=1), tf.constant([0.0, 2.0, -1.0, -1.0], dtype=tf.float32)) @parameterized.parameters((16, 8, 200, [0.009, 0.004, 0.004]), (31, 17, 100, [0.013, 0.004, 0.004]), (71, 41, 100, [0.045, 0.003, 0.002]), (150, 100, 100, [0.129, 0.010, 0.001]), (300, 300, 100, [0.116, 0.016, 0.002]), (600, 600, 50, [0.176, 0.032, 0.003])) def test_reference_match(self, n, top, runs, max_devs): """Compares that new optimized method is close to reference method. Runs two algorithms with same sets of input boxes and scores, and measures deviation between returned sets of prunned boxes. Read more about test results at ./g3doc/non_max_suppression.md (*) Avoid flakiness with safe boundary (go/python-tips/048): deviation between two sets is a positive number, which may vary from test to test. Doing multiple runs expected to reduce average deviation variation following LLN theorem. Therefore by having first test run we know upper deviation bound which algorithm would not exceed until broken (in any feasible amount of time in the future). Use of this safe boundary makes test non-flaky. Args: n: number of boxes and scores on input of the algorithm. top: limit of output boxes count. runs: for the statistical testing number of runs to performs to avoid tests flakiness. max_devs: series of mean limits on deviation between optimized and reference algorithms with different number of refinements. (Indexes of elements correspond to number of refinements) Please use margin based values proposed by failed test to avoid flaky testing. """ boxes = random_boxes([runs, n]) scores = tf.random.uniform(shape=[runs, n]) reference, valid = tf.image.non_max_suppression_padded( boxes, scores, top, pad_to_max_output_size=True) for refinements, max_deviation in enumerate(max_devs): optimized = edgetpu.non_max_suppression_padded( boxes, scores, top, refinements=refinements) deviation, margin = _deviation_and_margin(reference, valid, optimized) self.assertLess( deviation, max_deviation, msg='Deviation rate between optimized and reference implementations is ' 'higher than expected. If you are tuning the test, recommended safe ' 'deviation rate is ' f'{deviation} + {margin} = {deviation + margin}') @parameterized.parameters(([16], 8), ([91, 150], 100), ([20, 20, 200], 10)) def test_sharded_match(self, shape: list[int], top: int): boxes = random_boxes(shape) scores = tf.random.uniform(shape=shape) optimized = edgetpu.non_max_suppression_padded(boxes, scores, top) reference = edgetpu._non_max_suppression_as_is(boxes, scores, top) self.assertAllEqual(optimized, reference) _sharded_nms = edgetpu.non_max_suppression_padded _stright_nms = edgetpu._non_max_suppression_as_is @parameterized.parameters(([16], 8, _sharded_nms, True), ([16], 8, _stright_nms, True), ([91, 150], 100, _sharded_nms, True), ([91, 150], 100, _stright_nms, False), ([20, 20, 200], 10, _sharded_nms, True), ([20, 20, 200], 10, _stright_nms, False)) def test_sharded_size(self, shape: list[int], top: int, algorithm, fits_as_is: bool): scores = tf.keras.Input(shape=shape, batch_size=1) boxes = tf.keras.Input(shape=shape + [4], batch_size=1) optimized = algorithm(boxes, scores, top) model = tf.keras.Model(inputs=[boxes, scores], outputs=optimized) max_size = _maximum_activation_size(model) if fits_as_is: # Sharding done or not needed. self.assertLessEqual(max_size, edgetpu._RECOMMENDED_NMS_MEMORY) else: # Sharding needed. self.assertGreater(max_size, edgetpu._RECOMMENDED_NMS_MEMORY) def test_shard_tensors(self): a: tf.Tensor = tf.constant([[0, 1, 2, 3, 4]]) b: tf.Tensor = tf.constant([[ [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24], ]]) for i, (a_i, b_i) in enumerate(edgetpu.shard_tensors(1, 3, (a, b))): self.assertAllEqual(a_i, a[:, i * 3:i * 3 + 3]) self.assertAllEqual(b_i, b[:, i * 3:i * 3 + 3, :]) def test_top_k_sharded_fusion_arguments_validation(self): # Input scores is not pair of aggregation and shard. self.assertRaises(ValueError, edgetpu.concat_and_top_k, 100, tf.zeros(shape=[1000])) # Input other values is not pairs of aggregation and shard. self.assertRaises(TypeError, edgetpu.concat_and_top_k, 100, (None, tf.zeros(shape=[1000])), None, tf.zeros(shape=[1000])) # Insufficient rank to do top_k self.assertRaises(IndexError, edgetpu.concat_and_top_k, 100, (None, tf.constant(1.))) @parameterized.parameters(0, 1, 2) def test_top_k_sharded_fusion_vs_top_k_unsharded(self, axis: int): r"""Tests `horizontal` sharding using shard_tensors and concat_and_top_k. Will generate and test graph (on diagram 4 shards, in test 6 shards): Input ----- | +-------+-------------------------------------------- | Split |----------------------- \ +-------+--- \ | | \ | | +-------+ +--------+ +-------+ +--------+ +-------+ +--------+ +-------+ | top k |-| concat |-| top k |-| concat |-| top k |-| concat |-| top k | +-------+ +--------+ +-------+ +--------+ +-------+ +--------+ +-------+ | Output ------ Args: axis: test top_k axis (tensor rank will be axis + 1) """ sample: tf.Tensor = tf.random.uniform( shape=axis * [1] + [10000], dtype=tf.float32) top_1000_direct: tf.Tensor = tf.math.top_k(sample, 1000).values top_1000_sharded: Optional[tf.Tensor] = None for (piece,) in edgetpu.shard_tensors(axis, 1500, (sample,)): (top_1000_sharded,) = edgetpu.concat_and_top_k( 1000, (top_1000_sharded, piece)) self.assertAllEqual(top_1000_direct, top_1000_sharded) if __name__ == '__main__': tf.test.main()
10,110
40.780992
82
py
models
models-master/official/vision/modeling/layers/deeplab.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layers for DeepLabV3.""" import tensorflow as tf from official.modeling import tf_utils class SpatialPyramidPooling(tf.keras.layers.Layer): """Implements the Atrous Spatial Pyramid Pooling. References: [Rethinking Atrous Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1706.05587.pdf) [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) """ def __init__( self, output_channels, dilation_rates, pool_kernel_size=None, use_sync_bn=False, batchnorm_momentum=0.99, batchnorm_epsilon=0.001, activation='relu', dropout=0.5, kernel_initializer='glorot_uniform', kernel_regularizer=None, interpolation='bilinear', use_depthwise_convolution=False, **kwargs): """Initializes `SpatialPyramidPooling`. Args: output_channels: Number of channels produced by SpatialPyramidPooling. dilation_rates: A list of integers for parallel dilated conv. pool_kernel_size: A list of integers or None. If None, global average pooling is applied, otherwise an average pooling of pool_kernel_size is applied. use_sync_bn: A bool, whether or not to use sync batch normalization. batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. activation: A `str` for type of activation to be used. Defaults to 'relu'. dropout: A float for the dropout rate before output. Defaults to 0.5. kernel_initializer: Kernel initializer for conv layers. Defaults to `glorot_uniform`. kernel_regularizer: Kernel regularizer for conv layers. Defaults to None. interpolation: The interpolation method for upsampling. Defaults to `bilinear`. use_depthwise_convolution: Allows spatial pooling to be separable depthwise convolusions. [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation]( https://arxiv.org/pdf/1802.02611.pdf) **kwargs: Other keyword arguments for the layer. """ super(SpatialPyramidPooling, self).__init__(**kwargs) self.output_channels = output_channels self.dilation_rates = dilation_rates self.use_sync_bn = use_sync_bn self.batchnorm_momentum = batchnorm_momentum self.batchnorm_epsilon = batchnorm_epsilon self.activation = activation self.dropout = dropout self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self.interpolation = interpolation self.input_spec = tf.keras.layers.InputSpec(ndim=4) self.pool_kernel_size = pool_kernel_size self.use_depthwise_convolution = use_depthwise_convolution def build(self, input_shape): channels = input_shape[3] self.aspp_layers = [] bn_op = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_sequential = tf.keras.Sequential([ tf.keras.layers.Conv2D( filters=self.output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self.kernel_initializer), kernel_regularizer=self.kernel_regularizer, use_bias=False), bn_op( axis=bn_axis, momentum=self.batchnorm_momentum, epsilon=self.batchnorm_epsilon, synchronized=self.use_sync_bn), tf.keras.layers.Activation(self.activation) ]) self.aspp_layers.append(conv_sequential) for dilation_rate in self.dilation_rates: leading_layers = [] kernel_size = (3, 3) if self.use_depthwise_convolution: leading_layers += [ tf.keras.layers.DepthwiseConv2D( depth_multiplier=1, kernel_size=kernel_size, padding='same', dilation_rate=dilation_rate, use_bias=False) ] kernel_size = (1, 1) conv_sequential = tf.keras.Sequential(leading_layers + [ tf.keras.layers.Conv2D( filters=self.output_channels, kernel_size=kernel_size, padding='same', kernel_regularizer=self.kernel_regularizer, kernel_initializer=tf_utils.clone_initializer( self.kernel_initializer), dilation_rate=dilation_rate, use_bias=False), bn_op( axis=bn_axis, momentum=self.batchnorm_momentum, epsilon=self.batchnorm_epsilon, synchronized=self.use_sync_bn), tf.keras.layers.Activation(self.activation) ]) self.aspp_layers.append(conv_sequential) if self.pool_kernel_size is None: pool_sequential = tf.keras.Sequential([ tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Reshape((1, 1, channels))]) else: pool_sequential = tf.keras.Sequential([ tf.keras.layers.AveragePooling2D(self.pool_kernel_size)]) pool_sequential.add( tf.keras.Sequential([ tf.keras.layers.Conv2D( filters=self.output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self.kernel_initializer), kernel_regularizer=self.kernel_regularizer, use_bias=False), bn_op( axis=bn_axis, momentum=self.batchnorm_momentum, epsilon=self.batchnorm_epsilon, synchronized=self.use_sync_bn), tf.keras.layers.Activation(self.activation) ])) self.aspp_layers.append(pool_sequential) self.projection = tf.keras.Sequential([ tf.keras.layers.Conv2D( filters=self.output_channels, kernel_size=(1, 1), kernel_initializer=tf_utils.clone_initializer( self.kernel_initializer), kernel_regularizer=self.kernel_regularizer, use_bias=False), bn_op( axis=bn_axis, momentum=self.batchnorm_momentum, epsilon=self.batchnorm_epsilon, synchronized=self.use_sync_bn), tf.keras.layers.Activation(self.activation), tf.keras.layers.Dropout(rate=self.dropout) ]) def call(self, inputs, training=None): if training is None: training = tf.keras.backend.learning_phase() result = [] for i, layer in enumerate(self.aspp_layers): x = layer(inputs, training=training) # Apply resize layer to the end of the last set of layers. if i == len(self.aspp_layers) - 1: x = tf.image.resize(tf.cast(x, tf.float32), tf.shape(inputs)[1:3]) result.append(tf.cast(x, inputs.dtype)) result = tf.concat(result, axis=-1) result = self.projection(result, training=training) return result def get_config(self): config = { 'output_channels': self.output_channels, 'dilation_rates': self.dilation_rates, 'pool_kernel_size': self.pool_kernel_size, 'use_sync_bn': self.use_sync_bn, 'batchnorm_momentum': self.batchnorm_momentum, 'batchnorm_epsilon': self.batchnorm_epsilon, 'activation': self.activation, 'dropout': self.dropout, 'kernel_initializer': tf.keras.initializers.serialize( self.kernel_initializer), 'kernel_regularizer': tf.keras.regularizers.serialize( self.kernel_regularizer), 'interpolation': self.interpolation, } base_config = super(SpatialPyramidPooling, self).get_config() return dict(list(base_config.items()) + list(config.items()))
8,561
37.053333
80
py
models
models-master/official/vision/modeling/layers/box_sampler.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of box sampler.""" # Import libraries import tensorflow as tf from official.vision.ops import sampling_ops @tf.keras.utils.register_keras_serializable(package='Vision') class BoxSampler(tf.keras.layers.Layer): """Creates a BoxSampler to sample positive and negative boxes.""" def __init__(self, num_samples: int = 512, foreground_fraction: float = 0.25, **kwargs): """Initializes a box sampler. Args: num_samples: An `int` of the number of sampled boxes per image. foreground_fraction: A `float` in [0, 1], what percentage of boxes should be sampled from the positive examples. **kwargs: Additional keyword arguments passed to Layer. """ self._config_dict = { 'num_samples': num_samples, 'foreground_fraction': foreground_fraction, } super(BoxSampler, self).__init__(**kwargs) def call(self, positive_matches: tf.Tensor, negative_matches: tf.Tensor, ignored_matches: tf.Tensor): """Samples and selects positive and negative instances. Args: positive_matches: A `bool` tensor of shape of [batch, N] where N is the number of instances. For each element, `True` means the instance corresponds to a positive example. negative_matches: A `bool` tensor of shape of [batch, N] where N is the number of instances. For each element, `True` means the instance corresponds to a negative example. ignored_matches: A `bool` tensor of shape of [batch, N] where N is the number of instances. For each element, `True` means the instance should be ignored. Returns: A `tf.tensor` of shape of [batch_size, K], storing the indices of the sampled examples, where K is `num_samples`. """ sample_candidates = tf.logical_and( tf.logical_or(positive_matches, negative_matches), tf.logical_not(ignored_matches)) sampler = sampling_ops.BalancedPositiveNegativeSampler( positive_fraction=self._config_dict['foreground_fraction'], is_static=True) batch_size = sample_candidates.shape[0] sampled_indicators = [] for i in range(batch_size): sampled_indicator = sampler.subsample( sample_candidates[i], self._config_dict['num_samples'], positive_matches[i]) sampled_indicators.append(sampled_indicator) sampled_indicators = tf.stack(sampled_indicators) _, selected_indices = tf.nn.top_k( tf.cast(sampled_indicators, dtype=tf.int32), k=self._config_dict['num_samples'], sorted=True) return selected_indices def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
3,401
35.191489
79
py
models
models-master/official/vision/modeling/layers/roi_aligner.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of ROI aligner.""" from typing import Mapping import tensorflow as tf from official.vision.ops import spatial_transform_ops @tf.keras.utils.register_keras_serializable(package='Vision') class MultilevelROIAligner(tf.keras.layers.Layer): """Performs ROIAlign for the second stage processing.""" def __init__(self, crop_size: int = 7, sample_offset: float = 0.5, **kwargs): """Initializes a ROI aligner. Args: crop_size: An `int` of the output size of the cropped features. sample_offset: A `float` in [0, 1] of the subpixel sample offset. **kwargs: Additional keyword arguments passed to Layer. """ self._config_dict = { 'crop_size': crop_size, 'sample_offset': sample_offset, } super(MultilevelROIAligner, self).__init__(**kwargs) def call(self, features: Mapping[str, tf.Tensor], boxes: tf.Tensor, training: bool = None): """Generates ROIs. Args: features: A dictionary with key as pyramid level and value as features. The features are in shape of [batch_size, height_l, width_l, num_filters]. boxes: A 3-D `tf.Tensor` of shape [batch_size, num_boxes, 4]. Each row represents a box with [y1, x1, y2, x2] in un-normalized coordinates. from grid point. training: A `bool` of whether it is in training mode. Returns: A 5-D `tf.Tensor` representing feature crop of shape [batch_size, num_boxes, crop_size, crop_size, num_filters]. """ roi_features = spatial_transform_ops.multilevel_crop_and_resize( features, boxes, output_size=self._config_dict['crop_size'], sample_offset=self._config_dict['sample_offset']) return roi_features def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
2,533
33.712329
79
py
models
models-master/official/vision/modeling/layers/nn_blocks.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains common building blocks for neural networks.""" from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Text # Import libraries from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp import modeling as nlp_modeling from official.vision.modeling.layers import nn_layers def _pad_strides(strides: int, axis: int) -> Tuple[int, int, int, int]: """Converts int to len 4 strides (`tf.nn.avg_pool` uses length 4).""" if axis == 1: return (1, 1, strides, strides) else: return (1, strides, strides, 1) def _maybe_downsample(x: tf.Tensor, out_filter: int, strides: int, axis: int) -> tf.Tensor: """Downsamples feature map and 0-pads tensor if in_filter != out_filter.""" data_format = 'NCHW' if axis == 1 else 'NHWC' strides = _pad_strides(strides, axis=axis) x = tf.nn.avg_pool(x, strides, strides, 'VALID', data_format=data_format) in_filter = x.shape[axis] if in_filter < out_filter: # Pad on channel dimension with 0s: half on top half on bottom. pad_size = [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2] if axis == 1: x = tf.pad(x, [[0, 0], pad_size, [0, 0], [0, 0]]) else: x = tf.pad(x, [[0, 0], [0, 0], [0, 0], pad_size]) return x + 0. @tf.keras.utils.register_keras_serializable(package='Vision') class ResidualBlock(tf.keras.layers.Layer): """A residual block.""" def __init__(self, filters, strides, use_projection=False, se_ratio=None, resnetd_shortcut=False, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_explicit_padding: bool = False, use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, bn_trainable=True, **kwargs): """Initializes a residual block with BN after convolutions. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. use_projection: A `bool` for whether this block should use a projection shortcut (versus the default identity shortcut). This is usually `True` for the first block of a block group, which may change the number of filters and the resolution. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. resnetd_shortcut: A `bool` if True, apply the resnetd style modification to the shortcut connection. Not implemented in residual blocks. stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. bn_trainable: A `bool` that indicates whether batch norm layers should be trainable. Default to True. **kwargs: Additional keyword arguments to be passed. """ super(ResidualBlock, self).__init__(**kwargs) self._filters = filters self._strides = strides self._use_projection = use_projection self._se_ratio = se_ratio self._resnetd_shortcut = resnetd_shortcut self._use_explicit_padding = use_explicit_padding self._use_sync_bn = use_sync_bn self._activation = activation self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_fn = tf_utils.get_activation(activation) self._bn_trainable = bn_trainable def build(self, input_shape): if self._use_projection: self._shortcut = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=1, strides=self._strides, use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) conv1_padding = 'same' # explicit padding here is added for centernet if self._use_explicit_padding: self._pad = tf.keras.layers.ZeroPadding2D(padding=(1, 1)) conv1_padding = 'valid' self._conv1 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=3, strides=self._strides, padding=conv1_padding, use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) self._conv2 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=3, strides=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = nn_layers.SqueezeExcitation( in_filters=self._filters, out_filters=self._filters, se_ratio=self._se_ratio, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None super(ResidualBlock, self).build(input_shape) def get_config(self): config = { 'filters': self._filters, 'strides': self._strides, 'use_projection': self._use_projection, 'se_ratio': self._se_ratio, 'resnetd_shortcut': self._resnetd_shortcut, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_explicit_padding': self._use_explicit_padding, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'bn_trainable': self._bn_trainable } base_config = super(ResidualBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): shortcut = inputs if self._use_projection: shortcut = self._shortcut(shortcut) shortcut = self._norm0(shortcut) if self._use_explicit_padding: inputs = self._pad(inputs) x = self._conv1(inputs) x = self._norm1(x) x = self._activation_fn(x) x = self._conv2(x) x = self._norm2(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) if self._stochastic_depth: x = self._stochastic_depth(x, training=training) return self._activation_fn(x + shortcut) @tf.keras.utils.register_keras_serializable(package='Vision') class BottleneckBlock(tf.keras.layers.Layer): """A standard bottleneck block.""" def __init__(self, filters, strides, dilation_rate=1, use_projection=False, se_ratio=None, resnetd_shortcut=False, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, bn_trainable=True, **kwargs): """Initializes a standard bottleneck block with BN after convolutions. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. dilation_rate: An `int` dilation_rate of convolutions. Default to 1. use_projection: A `bool` for whether this block should use a projection shortcut (versus the default identity shortcut). This is usually `True` for the first block of a block group, which may change the number of filters and the resolution. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. resnetd_shortcut: A `bool`. If True, apply the resnetd style modification to the shortcut connection. stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. bn_trainable: A `bool` that indicates whether batch norm layers should be trainable. Default to True. **kwargs: Additional keyword arguments to be passed. """ super(BottleneckBlock, self).__init__(**kwargs) self._filters = filters self._strides = strides self._dilation_rate = dilation_rate self._use_projection = use_projection self._se_ratio = se_ratio self._resnetd_shortcut = resnetd_shortcut self._use_sync_bn = use_sync_bn self._activation = activation self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._bn_trainable = bn_trainable def build(self, input_shape): if self._use_projection: if self._resnetd_shortcut: self._shortcut0 = tf.keras.layers.AveragePooling2D( pool_size=2, strides=self._strides, padding='same') self._shortcut1 = tf.keras.layers.Conv2D( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) else: self._shortcut = tf.keras.layers.Conv2D( filters=self._filters * 4, kernel_size=1, strides=self._strides, use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) self._conv1 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) self._activation1 = tf_utils.get_activation( self._activation, use_keras_layer=True) self._conv2 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=3, strides=self._strides, dilation_rate=self._dilation_rate, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) self._activation2 = tf_utils.get_activation( self._activation, use_keras_layer=True) self._conv3 = tf.keras.layers.Conv2D( filters=self._filters * 4, kernel_size=1, strides=1, use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm3 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, ) self._activation3 = tf_utils.get_activation( self._activation, use_keras_layer=True) if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: self._squeeze_excitation = nn_layers.SqueezeExcitation( in_filters=self._filters * 4, out_filters=self._filters * 4, se_ratio=self._se_ratio, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) else: self._squeeze_excitation = None if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tf.keras.layers.Add() super(BottleneckBlock, self).build(input_shape) def get_config(self): config = { 'filters': self._filters, 'strides': self._strides, 'dilation_rate': self._dilation_rate, 'use_projection': self._use_projection, 'se_ratio': self._se_ratio, 'resnetd_shortcut': self._resnetd_shortcut, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'bn_trainable': self._bn_trainable } base_config = super(BottleneckBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): shortcut = inputs if self._use_projection: if self._resnetd_shortcut: shortcut = self._shortcut0(shortcut) shortcut = self._shortcut1(shortcut) else: shortcut = self._shortcut(shortcut) shortcut = self._norm0(shortcut) x = self._conv1(inputs) x = self._norm1(x) x = self._activation1(x) x = self._conv2(x) x = self._norm2(x) x = self._activation2(x) x = self._conv3(x) x = self._norm3(x) if self._squeeze_excitation: x = self._squeeze_excitation(x) if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) return self._activation3(x) @tf.keras.utils.register_keras_serializable(package='Vision') class InvertedBottleneckBlock(tf.keras.layers.Layer): """An inverted bottleneck block.""" def __init__(self, in_filters, out_filters, expand_ratio, strides, kernel_size=3, se_ratio=None, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', se_inner_activation='relu', se_gating_activation='sigmoid', se_round_down_protect=True, expand_se_in_filters=False, depthwise_activation=None, use_sync_bn=False, dilation_rate=1, divisible_by=1, regularize_depthwise=False, use_depthwise=True, use_residual=True, norm_momentum=0.99, norm_epsilon=0.001, output_intermediate_endpoints=False, **kwargs): """Initializes an inverted bottleneck block with BN after convolutions. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. expand_ratio: An `int` of expand_ratio for an inverted bottleneck block. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. kernel_size: An `int` kernel_size of the depthwise conv layer. se_ratio: A `float` or None. If not None, se ratio for the squeeze and excitation layer. stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. se_inner_activation: A `str` name of squeeze-excitation inner activation. se_gating_activation: A `str` name of squeeze-excitation gating activation. se_round_down_protect: A `bool` of whether round down more than 10% will be allowed in SE layer. expand_se_in_filters: A `bool` of whether or not to expand in_filter in squeeze and excitation layer. depthwise_activation: A `str` name of the activation function for depthwise only. use_sync_bn: A `bool`. If True, use synchronized batch normalization. dilation_rate: An `int` that specifies the dilation rate to use for. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. dilated convolution: An `int` to specify the same value for all spatial dimensions. regularize_depthwise: A `bool` of whether or not apply regularization on depthwise. use_depthwise: A `bool` of whether to uses fused convolutions instead of depthwise. use_residual: A `bool` of whether to include residual connection between input and output. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. output_intermediate_endpoints: A `bool` of whether or not output the intermediate endpoints. **kwargs: Additional keyword arguments to be passed. """ super(InvertedBottleneckBlock, self).__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._expand_ratio = expand_ratio self._strides = strides self._kernel_size = kernel_size self._se_ratio = se_ratio self._divisible_by = divisible_by self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._dilation_rate = dilation_rate self._use_sync_bn = use_sync_bn self._regularize_depthwise = regularize_depthwise self._use_depthwise = use_depthwise self._use_residual = use_residual self._activation = activation self._se_inner_activation = se_inner_activation self._se_gating_activation = se_gating_activation self._depthwise_activation = depthwise_activation self._se_round_down_protect = se_round_down_protect self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._expand_se_in_filters = expand_se_in_filters self._output_intermediate_endpoints = output_intermediate_endpoints self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 if not depthwise_activation: self._depthwise_activation = activation if regularize_depthwise: self._depthsize_regularizer = kernel_regularizer else: self._depthsize_regularizer = None def build(self, input_shape): expand_filters = self._in_filters if self._expand_ratio > 1: # First 1x1 conv for channel expansion. expand_filters = nn_layers.make_divisible( self._in_filters * self._expand_ratio, self._divisible_by) expand_kernel = 1 if self._use_depthwise else self._kernel_size expand_stride = 1 if self._use_depthwise else self._strides self._conv0 = tf.keras.layers.Conv2D( filters=expand_filters, kernel_size=expand_kernel, strides=expand_stride, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._activation_layer = tf_utils.get_activation( self._activation, use_keras_layer=True) if self._use_depthwise: # Depthwise conv. self._conv1 = tf.keras.layers.DepthwiseConv2D( kernel_size=(self._kernel_size, self._kernel_size), strides=self._strides, padding='same', depth_multiplier=1, dilation_rate=self._dilation_rate, use_bias=False, depthwise_initializer=tf_utils.clone_initializer( self._kernel_initializer), depthwise_regularizer=self._depthsize_regularizer, bias_regularizer=self._bias_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._depthwise_activation_layer = tf_utils.get_activation( self._depthwise_activation, use_keras_layer=True) # Squeeze and excitation. if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1: logging.info('Use Squeeze and excitation.') in_filters = self._in_filters if self._expand_se_in_filters: in_filters = expand_filters self._squeeze_excitation = nn_layers.SqueezeExcitation( in_filters=in_filters, out_filters=expand_filters, se_ratio=self._se_ratio, divisible_by=self._divisible_by, round_down_protect=self._se_round_down_protect, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._se_inner_activation, gating_activation=self._se_gating_activation) else: self._squeeze_excitation = None # Last 1x1 conv. self._conv2 = tf.keras.layers.Conv2D( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tf.keras.layers.Add() super(InvertedBottleneckBlock, self).build(input_shape) def get_config(self): config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'expand_ratio': self._expand_ratio, 'strides': self._strides, 'kernel_size': self._kernel_size, 'se_ratio': self._se_ratio, 'divisible_by': self._divisible_by, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'se_inner_activation': self._se_inner_activation, 'se_gating_activation': self._se_gating_activation, 'se_round_down_protect': self._se_round_down_protect, 'expand_se_in_filters': self._expand_se_in_filters, 'depthwise_activation': self._depthwise_activation, 'dilation_rate': self._dilation_rate, 'use_sync_bn': self._use_sync_bn, 'regularize_depthwise': self._regularize_depthwise, 'use_depthwise': self._use_depthwise, 'use_residual': self._use_residual, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'output_intermediate_endpoints': self._output_intermediate_endpoints } base_config = super(InvertedBottleneckBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): endpoints = {} shortcut = inputs if self._expand_ratio > 1: x = self._conv0(inputs) x = self._norm0(x) x = self._activation_layer(x) else: x = inputs if self._use_depthwise: x = self._conv1(x) x = self._norm1(x) x = self._depthwise_activation_layer(x) if self._output_intermediate_endpoints: endpoints['depthwise'] = x if self._squeeze_excitation: x = self._squeeze_excitation(x) x = self._conv2(x) x = self._norm2(x) if (self._use_residual and self._in_filters == self._out_filters and self._strides == 1): if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) if self._output_intermediate_endpoints: return x, endpoints return x @tf.keras.utils.register_keras_serializable(package='Vision') class ResidualInner(tf.keras.layers.Layer): """Creates a single inner block of a residual. This corresponds to `F`/`G` functions in the RevNet paper: Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse. The Reversible Residual Network: Backpropagation Without Storing Activations. (https://arxiv.org/pdf/1707.04585.pdf) """ def __init__( self, filters: int, strides: int, kernel_initializer: Union[str, Callable[ ..., tf.keras.initializers.Initializer]] = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activation: Union[str, Callable[..., tf.Tensor]] = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, batch_norm_first: bool = True, **kwargs): """Initializes a ResidualInner. Args: filters: An `int` of output filter size. strides: An `int` of stride size for convolution for the residual block. kernel_initializer: A `str` or `tf.keras.initializers.Initializer` instance for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` for Conv2D. activation: A `str` or `callable` instance of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. batch_norm_first: A `bool` of whether to apply activation and batch norm before conv. **kwargs: Additional keyword arguments to be passed. """ super(ResidualInner, self).__init__(**kwargs) self.strides = strides self.filters = filters self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._kernel_regularizer = kernel_regularizer self._activation = tf.keras.activations.get(activation) self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._batch_norm_first = batch_norm_first self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_fn = tf_utils.get_activation(activation) def build(self, input_shape: tf.TensorShape): if self._batch_norm_first: self._batch_norm_0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv2d_1 = tf.keras.layers.Conv2D( filters=self.filters, kernel_size=3, strides=self.strides, use_bias=False, padding='same', kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) self._batch_norm_1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv2d_2 = tf.keras.layers.Conv2D( filters=self.filters, kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) super(ResidualInner, self).build(input_shape) def get_config(self) -> Dict[str, Any]: config = { 'filters': self.filters, 'strides': self.strides, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'batch_norm_first': self._batch_norm_first, } base_config = super(ResidualInner, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: x = inputs if self._batch_norm_first: x = self._batch_norm_0(x, training=training) x = self._activation_fn(x) x = self._conv2d_1(x) x = self._batch_norm_1(x, training=training) x = self._activation_fn(x) x = self._conv2d_2(x) return x @tf.keras.utils.register_keras_serializable(package='Vision') class BottleneckResidualInner(tf.keras.layers.Layer): """Creates a single inner block of a bottleneck. This corresponds to `F`/`G` functions in the RevNet paper: Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse. The Reversible Residual Network: Backpropagation Without Storing Activations. (https://arxiv.org/pdf/1707.04585.pdf) """ def __init__( self, filters: int, strides: int, kernel_initializer: Union[str, Callable[ ..., tf.keras.initializers.Initializer]] = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activation: Union[str, Callable[..., tf.Tensor]] = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, batch_norm_first: bool = True, **kwargs): """Initializes a BottleneckResidualInner. Args: filters: An `int` number of filters for first 2 convolutions. Last Last, and thus the number of output channels from the bottlneck block is `4*filters` strides: An `int` of stride size for convolution for the residual block. kernel_initializer: A `str` or `tf.keras.initializers.Initializer` instance for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` for Conv2D. activation: A `str` or `callable` instance of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. batch_norm_first: A `bool` of whether to apply activation and batch norm before conv. **kwargs: Additional keyword arguments to be passed. """ super(BottleneckResidualInner, self).__init__(**kwargs) self.strides = strides self.filters = filters self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._kernel_regularizer = kernel_regularizer self._activation = tf.keras.activations.get(activation) self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._batch_norm_first = batch_norm_first self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_fn = tf_utils.get_activation(activation) def build(self, input_shape: tf.TensorShape): if self._batch_norm_first: self._batch_norm_0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv2d_1 = tf.keras.layers.Conv2D( filters=self.filters, kernel_size=1, strides=self.strides, use_bias=False, padding='same', kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) self._batch_norm_1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv2d_2 = tf.keras.layers.Conv2D( filters=self.filters, kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) self._batch_norm_2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv2d_3 = tf.keras.layers.Conv2D( filters=self.filters * 4, kernel_size=1, strides=1, use_bias=False, padding='same', kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) super(BottleneckResidualInner, self).build(input_shape) def get_config(self) -> Dict[str, Any]: config = { 'filters': self.filters, 'strides': self.strides, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'batch_norm_first': self._batch_norm_first, } base_config = super(BottleneckResidualInner, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: x = inputs if self._batch_norm_first: x = self._batch_norm_0(x, training=training) x = self._activation_fn(x) x = self._conv2d_1(x) x = self._batch_norm_1(x, training=training) x = self._activation_fn(x) x = self._conv2d_2(x) x = self._batch_norm_2(x, training=training) x = self._activation_fn(x) x = self._conv2d_3(x) return x @tf.keras.utils.register_keras_serializable(package='Vision') class ReversibleLayer(tf.keras.layers.Layer): """Creates a reversible layer. Computes y1 = x1 + f(x2), y2 = x2 + g(y1), where f and g can be arbitrary layers that are stateless, which in this case are `ResidualInner` layers. """ def __init__(self, f: tf.keras.layers.Layer, g: tf.keras.layers.Layer, manual_grads: bool = True, **kwargs): """Initializes a ReversibleLayer. Args: f: A `tf.keras.layers.Layer` instance of `f` inner block referred to in paper. Each reversible layer consists of two inner functions. For example, in RevNet the reversible residual consists of two f/g inner (bottleneck) residual functions. Where the input to the reversible layer is x, the input gets partitioned in the channel dimension and the forward pass follows (eq8): x = [x1; x2], z1 = x1 + f(x2), y2 = x2 + g(z1), y1 = stop_gradient(z1). g: A `tf.keras.layers.Layer` instance of `g` inner block referred to in paper. Detailed explanation same as above as `f` arg. manual_grads: A `bool` [Testing Only] of whether to manually take gradients as in Algorithm 1 or defer to autograd. **kwargs: Additional keyword arguments to be passed. """ super(ReversibleLayer, self).__init__(**kwargs) self._f = f self._g = g self._manual_grads = manual_grads if tf.keras.backend.image_data_format() == 'channels_last': self._axis = -1 else: self._axis = 1 def get_config(self) -> Dict[str, Any]: config = { 'f': self._f, 'g': self._g, 'manual_grads': self._manual_grads, } base_config = super(ReversibleLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _ckpt_non_trainable_vars(self): self._f_non_trainable_vars = [ v.read_value() for v in self._f.non_trainable_variables ] self._g_non_trainable_vars = [ v.read_value() for v in self._g.non_trainable_variables ] def _load_ckpt_non_trainable_vars(self): for v, v_chkpt in zip(self._f.non_trainable_variables, self._f_non_trainable_vars): v.assign(v_chkpt) for v, v_chkpt in zip(self._g.non_trainable_variables, self._g_non_trainable_vars): v.assign(v_chkpt) def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: @tf.custom_gradient def reversible( x: tf.Tensor ) -> Tuple[tf.Tensor, Callable[[Any], Tuple[List[tf.Tensor], List[tf.Tensor]]]]: """Implements Algorithm 1 in the RevNet paper. Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse. The Reversible Residual Network: Backpropagation Without Storing Activations. (https://arxiv.org/pdf/1707.04585.pdf) Args: x: An input `tf.Tensor. Returns: y: The output [y1; y2] in Algorithm 1. grad_fn: A callable function that computes the gradients. """ with tf.GradientTape() as fwdtape: fwdtape.watch(x) x1, x2 = tf.split(x, num_or_size_splits=2, axis=self._axis) f_x2 = self._f(x2, training=training) x1_down = _maybe_downsample(x1, f_x2.shape[self._axis], self._f.strides, self._axis) z1 = f_x2 + x1_down g_z1 = self._g(z1, training=training) x2_down = _maybe_downsample(x2, g_z1.shape[self._axis], self._f.strides, self._axis) y2 = x2_down + g_z1 # Equation 8: https://arxiv.org/pdf/1707.04585.pdf # Decouple y1 and z1 so that their derivatives are different. y1 = tf.identity(z1) y = tf.concat([y1, y2], axis=self._axis) irreversible = ((self._f.strides != 1 or self._g.strides != 1) or (y.shape[self._axis] != inputs.shape[self._axis])) # Checkpointing moving mean/variance for batch normalization layers # as they shouldn't be updated during the custom gradient pass of f/g. self._ckpt_non_trainable_vars() def grad_fn( dy: tf.Tensor, variables: Optional[List[tf.Variable]] = None, ) -> Tuple[List[tf.Tensor], List[tf.Tensor]]: """Given dy calculate (dy/dx)|_{x_{input}} using f/g.""" if irreversible or not self._manual_grads: grads_combined = fwdtape.gradient( y, [x] + variables, output_gradients=dy) dx = grads_combined[0] grad_vars = grads_combined[1:] else: y1_nograd = tf.stop_gradient(y1) y2_nograd = tf.stop_gradient(y2) dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self._axis) # Index mapping from self.f/g.trainable_variables to grad_fn # input `variables` kwarg so that we can reorder dwf + dwg # variable gradient list to match `variables` order. f_var_refs = [v.ref() for v in self._f.trainable_variables] g_var_refs = [v.ref() for v in self._g.trainable_variables] fg_var_refs = f_var_refs + g_var_refs self_to_var_index = [fg_var_refs.index(v.ref()) for v in variables] # Algorithm 1 in paper (line # documented in-line) z1 = y1_nograd # line 2 with tf.GradientTape() as gtape: gtape.watch(z1) g_z1 = self._g(z1, training=training) x2 = y2_nograd - g_z1 # line 3 with tf.GradientTape() as ftape: ftape.watch(x2) f_x2 = self._f(x2, training=training) x1 = z1 - f_x2 # pylint: disable=unused-variable # line 4 # Compute gradients g_grads_combined = gtape.gradient( g_z1, [z1] + self._g.trainable_variables, output_gradients=dy2) dz1 = dy1 + g_grads_combined[0] # line 5 dwg = g_grads_combined[1:] # line 9 f_grads_combined = ftape.gradient( f_x2, [x2] + self._f.trainable_variables, output_gradients=dz1) dx2 = dy2 + f_grads_combined[0] # line 6 dwf = f_grads_combined[1:] # line 8 dx1 = dz1 # line 7 # Pack the input and variable gradients. dx = tf.concat([dx1, dx2], axis=self._axis) grad_vars = dwf + dwg # Reorder gradients (trainable_variables to variables kwarg order) grad_vars = [grad_vars[i] for i in self_to_var_index] # Restore batch normalization moving mean/variance for correctness. self._load_ckpt_non_trainable_vars() return dx, grad_vars # grad_fn end return y, grad_fn # reversible end activations = reversible(inputs) return activations @tf.keras.utils.register_keras_serializable(package='Vision') class DepthwiseSeparableConvBlock(tf.keras.layers.Layer): """Creates a depthwise separable convolution block with batch normalization. """ def __init__( self, filters: int, kernel_size: int = 3, strides: int = 1, regularize_depthwise=False, activation: Text = 'relu6', kernel_initializer: Text = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, dilation_rate: int = 1, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, **kwargs): """Initializes a convolution block with batch normalization. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. kernel_size: An `int` that specifies the height and width of the 2D convolution window. strides: An `int` of block stride. If greater than 1, this block will ultimately downsample the input. regularize_depthwise: A `bool`. If Ture, apply regularization on depthwise. activation: A `str` name of the activation function. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. dilation_rate: An `int` or tuple/list of 2 `int`, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. use_sync_bn: A `bool`. If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ super(DepthwiseSeparableConvBlock, self).__init__(**kwargs) self._filters = filters self._kernel_size = kernel_size self._strides = strides self._activation = activation self._regularize_depthwise = regularize_depthwise self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._dilation_rate = dilation_rate self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation_fn = tf_utils.get_activation(activation) if regularize_depthwise: self._depthsize_regularizer = kernel_regularizer else: self._depthsize_regularizer = None def get_config(self): config = { 'filters': self._filters, 'strides': self._strides, 'regularize_depthwise': self._regularize_depthwise, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } base_config = super(DepthwiseSeparableConvBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): self._dwconv0 = tf.keras.layers.DepthwiseConv2D( kernel_size=self._kernel_size, strides=self._strides, padding='same', depth_multiplier=1, dilation_rate=self._dilation_rate, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._depthsize_regularizer, use_bias=False) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._conv1 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) super(DepthwiseSeparableConvBlock, self).build(input_shape) def call(self, inputs, training=None): x = self._dwconv0(inputs) x = self._norm0(x) x = self._activation_fn(x) x = self._conv1(x) x = self._norm1(x) return self._activation_fn(x) @tf.keras.utils.register_keras_serializable(package='Vision') class TuckerConvBlock(tf.keras.layers.Layer): """An Tucker block (generalized bottleneck).""" def __init__(self, in_filters, out_filters, input_compression_ratio, output_compression_ratio, strides, kernel_size=3, stochastic_depth_drop_rate=None, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_sync_bn=False, divisible_by=1, use_residual=True, norm_momentum=0.99, norm_epsilon=0.001, **kwargs): """Initializes an inverted bottleneck block with BN after convolutions. Args: in_filters: An `int` number of filters of the input tensor. out_filters: An `int` number of filters of the output tensor. input_compression_ratio: An `float` of compression ratio for input filters. output_compression_ratio: An `float` of compression ratio for output filters. strides: An `int` block stride. If greater than 1, this block will ultimately downsample the input. kernel_size: An `int` kernel_size of the depthwise conv layer. stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for the stochastic depth layer. kernel_initializer: A `str` of kernel_initializer for convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d. Default to None. activation: A `str` name of the activation function. use_sync_bn: A `bool`. If True, use synchronized batch normalization. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. use_residual: A `bool` of whether to include residual connection between input and output. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ super(TuckerConvBlock, self).__init__(**kwargs) self._in_filters = in_filters self._out_filters = out_filters self._input_compression_ratio = input_compression_ratio self._output_compression_ratio = output_compression_ratio self._strides = strides self._kernel_size = kernel_size self._divisible_by = divisible_by self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._use_sync_bn = use_sync_bn self._use_residual = use_residual self._activation = activation self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 def build(self, input_shape): input_compressed_filters = nn_layers.make_divisible( value=self._in_filters * self._input_compression_ratio, divisor=self._divisible_by, round_down_protect=False) self._conv0 = tf.keras.layers.Conv2D( filters=input_compressed_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._activation_layer0 = tf_utils.get_activation( self._activation, use_keras_layer=True) output_compressed_filters = nn_layers.make_divisible( value=self._out_filters * self._output_compression_ratio, divisor=self._divisible_by, round_down_protect=False) self._conv1 = tf.keras.layers.Conv2D( filters=output_compressed_filters, kernel_size=self._kernel_size, strides=self._strides, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm1 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) self._activation_layer1 = tf_utils.get_activation( self._activation, use_keras_layer=True) # Last 1x1 conv. self._conv2 = tf.keras.layers.Conv2D( filters=self._out_filters, kernel_size=1, strides=1, padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) self._norm2 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn, ) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = None self._add = tf.keras.layers.Add() super(TuckerConvBlock, self).build(input_shape) def get_config(self): config = { 'in_filters': self._in_filters, 'out_filters': self._out_filters, 'input_compression_ratio': self._input_compression_ratio, 'output_compression_ratio': self._output_compression_ratio, 'strides': self._strides, 'kernel_size': self._kernel_size, 'divisible_by': self._divisible_by, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'use_residual': self._use_residual, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } base_config = super(TuckerConvBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): shortcut = inputs x = self._conv0(inputs) x = self._norm0(x) x = self._activation_layer0(x) x = self._conv1(x) x = self._norm1(x) x = self._activation_layer1(x) x = self._conv2(x) x = self._norm2(x) if (self._use_residual and self._in_filters == self._out_filters and self._strides == 1): if self._stochastic_depth: x = self._stochastic_depth(x, training=training) x = self._add([x, shortcut]) return x @tf.keras.utils.register_keras_serializable(package='Vision') class LayerScale(tf.keras.layers.Layer): """LayerScale as introduced in CaiT: https://arxiv.org/abs/2103.17239. Attributes: init_values (float): value to initialize the diagonal matrix of LayerScale. """ def __init__(self, init_values: float, **kwargs): """Initializes LayerScale.""" super().__init__(**kwargs) self.gamma_init_value = init_values def build(self, inputs_shape): gamma_shape = (1, 1, inputs_shape[2]) self.gamma = self.add_weight( name='layerscale_gamma', shape=gamma_shape, initializer=tf.keras.initializers.Constant(self.gamma_init_value), trainable=True, dtype=tf.float32, ) def call(self, inputs, inputs_positions=None): del inputs_positions return tf.cast(self.gamma, inputs.dtype) * inputs @tf.keras.utils.register_keras_serializable(package='Vision') class TransformerEncoderBlock(nlp_modeling.layers.TransformerEncoderBlock): """TransformerEncoderBlock layer with stochastic depth and layerscale.""" def __init__( self, *args, stochastic_depth_drop_rate=0.0, layer_scale_init_value=0.0, transformer_partition_dims=None, max_attention_inference_parallelism=None, **kwargs ): """Initializes TransformerEncoderBlock. Args: *args: positional arguments passed to super().__init__. stochastic_depth_drop_rate: the drop rate for the stochastic depth layer. layer_scale_init_value: transformer_partition_dims: transformer spatial partition dimenstions. max_attention_inference_parallelism: the number of examples to run in parallel in the attention blocks during inference. Set this limit to reduce the peak memory usage. If None, use vectorized operations to run the whole batch in parallel. **kwargs: keyword arguments passed to super().__init__. """ super().__init__(*args, **kwargs) self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._layer_scale_init_value = layer_scale_init_value self._transformer_partition_dims = transformer_partition_dims self._max_attention_inference_parallelism = ( max_attention_inference_parallelism ) def build(self, input_shape): super().build(input_shape) if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = lambda x, *args, **kwargs: tf.identity(x) if self._layer_scale_init_value: self._layer_scale_attn = LayerScale( init_values=self._layer_scale_init_value, name='layer_scale_attn') self._layer_scale_mlp = LayerScale( init_values=self._layer_scale_init_value, name='layer_scale_mlp') else: self._layer_scale_attn = lambda x, *args, **kwargs: tf.identity(x) self._layer_scale_mlp = lambda x, *args, **kwargs: tf.identity(x) self._attention_layer = nn_layers.MultiHeadAttention( num_heads=self._num_heads, key_dim=self._key_dim, value_dim=self._value_dim, dropout=self._attention_dropout_rate, use_bias=self._use_bias, kernel_initializer=self._attention_initializer, bias_initializer=tf_utils.clone_initializer(self._bias_initializer), attention_axes=self._attention_axes, output_shape=self._output_last_dim, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, max_inference_parallelism=self._max_attention_inference_parallelism, partition_dims=self._transformer_partition_dims, name='self_attention', ) def get_config(self): config = super().get_config() config.update({ 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'layer_scale_init_value': self._layer_scale_init_value, 'transformer_partition_dims': self._transformer_partition_dims, 'max_attention_inference_parallelism': ( self._max_attention_inference_parallelism ), }) return config def call(self, inputs, output_range=None, training=None): """Transformer self-attention encoder block call.""" if isinstance(inputs, (list, tuple)): if len(inputs) == 2: input_tensor, attention_mask = inputs key_value = None elif len(inputs) == 3: input_tensor, key_value, attention_mask = inputs else: raise ValueError('Unexpected inputs to %s with length at %d' % (self.__class__, len(inputs))) else: input_tensor, key_value, attention_mask = (inputs, None, None) if output_range is None: output_range = self._output_range if output_range: if self._norm_first: source_tensor = input_tensor[:, 0:output_range, :] input_tensor = self._attention_layer_norm(input_tensor) if key_value is not None: key_value = self._attention_layer_norm(key_value) target_tensor = input_tensor[:, 0:output_range, :] if attention_mask is not None: attention_mask = attention_mask[:, 0:output_range, :] else: if self._norm_first: source_tensor = input_tensor input_tensor = self._attention_layer_norm(input_tensor) if key_value is not None: key_value = self._attention_layer_norm(key_value) target_tensor = input_tensor if key_value is None: key_value = input_tensor attention_output, attention_scores = self._attention_layer( query=target_tensor, value=key_value, attention_mask=attention_mask, return_attention_scores=True) attention_output = self._attention_dropout(attention_output) attention_output = self._layer_scale_attn(attention_output) if self._norm_first: # Important to not combine `self._norm_first` and # `self._use_query_residual` into one if clause because else is only for # `_norm_first == False`. if self._use_query_residual: attention_output = source_tensor + self._stochastic_depth( attention_output, training=training) source_attention_output = attention_output attention_output = self._output_layer_norm(attention_output) else: if self._use_query_residual: attention_output = target_tensor + self._stochastic_depth( attention_output, training=training) attention_output = self._attention_layer_norm(attention_output) inner_output = self._intermediate_dense(attention_output) inner_output = self._intermediate_activation_layer(inner_output) inner_output = self._inner_dropout_layer(inner_output) layer_output = self._output_dense(inner_output) layer_output = self._output_dropout(layer_output) # Layerscale after MLP. layer_output = self._layer_scale_mlp(layer_output) if self._norm_first: layer_output = source_attention_output + self._stochastic_depth( layer_output, training=training) else: # During mixed precision training, layer norm output is always fp32 for # now. Casts fp32 for the subsequent add. layer_output = tf.cast(layer_output, tf.float32) layer_output = self._output_layer_norm( layer_output + self._stochastic_depth(attention_output, training=training)) if self._return_attention_scores: return layer_output, attention_scores else: return layer_output @tf.keras.utils.register_keras_serializable(package='Vision') class TransformerScaffold(nlp_modeling.layers.TransformerScaffold): """TransformerScaffold layer for vision applications.""" def __init__( self, *args, stochastic_depth_drop_rate: float = 0.0, return_attention_scores: bool = False, ffn_has_residual_connection: bool = False, max_attention_inference_parallelism: Optional[int] = None, **kwargs ): """Initializes TransformerEncoderBlock. Args: *args: positional arguments passed to super().__init__. stochastic_depth_drop_rate: the drop rate for the stochastic depth layer. return_attention_scores: whether to return the attention output. ffn_has_residual_connection: whether the feedforward network has internal residual connection and layer norm. If False, the residual connection and the layer norm op are called inside TransformerScaffold. max_attention_inference_parallelism: the number of examples to run in parallel in the attention blocks during inference. Set this limit to reduce the peak memory usage. If None, use vectorized operations to run the whole batch in parallel. **kwargs: keyword arguments passed to super().__init__. """ super().__init__(*args, **kwargs) self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._return_attention_scores = return_attention_scores self._ffn_has_residual_connection = ffn_has_residual_connection self._max_attention_inference_parallelism = ( max_attention_inference_parallelism ) def build(self, input_shape: Union[tf.TensorShape, List[int]]): if self._stochastic_depth_drop_rate: self._stochastic_depth = nn_layers.StochasticDepth( self._stochastic_depth_drop_rate) else: self._stochastic_depth = lambda x, *args, **kwargs: tf.identity(x) super().build(input_shape) if self._max_attention_inference_parallelism is not None: attention_layer_config = self._attention_layer.get_config() self._attention_layer = self._attention_cls.from_config({ **attention_layer_config, 'max_inference_parallelism': ( self._max_attention_inference_parallelism ), }) def get_config(self): config = super().get_config() config.update({ 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'return_attention_scores': self._return_attention_scores, 'ffn_has_residual_connection': self._ffn_has_residual_connection, 'max_attention_inference_parallelism': ( self._max_attention_inference_parallelism ), }) return config def call( self, inputs: tf.Tensor, training: Optional[bool] = None ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: """Transformer self-attention encoder block call.""" if isinstance(inputs, (list, tuple)): if len(inputs) == 2: input_tensor, attention_mask = inputs key_value = None elif len(inputs) == 3: input_tensor, key_value, attention_mask = inputs else: raise ValueError('Unexpected inputs to %s with length at %d' % (self.__class__, len(inputs))) else: input_tensor, key_value, attention_mask = (inputs, None, None) if self._norm_first: source_tensor = input_tensor input_tensor = self._attention_layer_norm(input_tensor) if key_value is None: key_value = input_tensor attention_output, attention_scores = self._attention_layer( query=input_tensor, value=key_value, attention_mask=attention_mask, training=training, return_attention_scores=True) attention_output = self._attention_dropout( attention_output, training=training) if self._norm_first: source_attention_output = source_tensor + self._stochastic_depth( attention_output, training=training) attention_output = self._output_layer_norm( source_attention_output) else: attention_output = self._attention_layer_norm( input_tensor + self._stochastic_depth(attention_output, training=training)) if self._feedforward_block is None: intermediate_output = self._intermediate_dense(attention_output) intermediate_output = self._intermediate_activation_layer( intermediate_output) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output, training=training) else: layer_output = self._feedforward_block( attention_output, training=training) if self._norm_first: if self._ffn_has_residual_connection: raise ValueError( 'In the case of `norm_first`, the residual connection should be' "done in the TransformerScaffold call function, not FFN's" 'call function.') output = source_attention_output + self._stochastic_depth( layer_output, training=training) else: # During mixed precision training, layer norm output is always fp32 for # now. Casts fp32 for the subsequent add. layer_output = tf.cast(layer_output, tf.float32) if self._ffn_has_residual_connection: output = self._stochastic_depth(layer_output, training=training) else: output = self._output_layer_norm( attention_output + self._stochastic_depth(layer_output, training=training)) if self._return_attention_scores: return output, attention_scores else: return output
72,010
37.263018
80
py
models
models-master/official/vision/modeling/backbones/spinenet_mobile.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions of Mobile SpineNet Networks.""" import math from typing import Any, List, Optional, Tuple # Import libraries from absl import logging import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers from official.vision.ops import spatial_transform_ops layers = tf.keras.layers FILTER_SIZE_MAP = { 0: 8, 1: 16, 2: 24, 3: 40, 4: 80, 5: 112, 6: 112, 7: 112, } # The fixed SpineNet architecture discovered by NAS. # Each element represents a specification of a building block: # (block_level, block_fn, (input_offset0, input_offset1), is_output). SPINENET_BLOCK_SPECS = [ (2, 'mbconv', (0, 1), False), (2, 'mbconv', (1, 2), False), (4, 'mbconv', (1, 2), False), (3, 'mbconv', (3, 4), False), (4, 'mbconv', (3, 5), False), (6, 'mbconv', (4, 6), False), (4, 'mbconv', (4, 6), False), (5, 'mbconv', (7, 8), False), (7, 'mbconv', (7, 9), False), (5, 'mbconv', (9, 10), False), (5, 'mbconv', (9, 11), False), (4, 'mbconv', (6, 11), True), (3, 'mbconv', (5, 11), True), (5, 'mbconv', (8, 13), True), (7, 'mbconv', (6, 15), True), (6, 'mbconv', (13, 15), True), ] SCALING_MAP = { '49': { 'endpoints_num_filters': 48, 'filter_size_scale': 1.0, 'block_repeats': 1, }, '49S': { 'endpoints_num_filters': 40, 'filter_size_scale': 0.65, 'block_repeats': 1, }, '49XS': { 'endpoints_num_filters': 24, 'filter_size_scale': 0.6, 'block_repeats': 1, }, } class BlockSpec(object): """A container class that specifies the block configuration for SpineNet.""" def __init__(self, level: int, block_fn: str, input_offsets: Tuple[int, int], is_output: bool): self.level = level self.block_fn = block_fn self.input_offsets = input_offsets self.is_output = is_output def build_block_specs( block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]: """Builds the list of BlockSpec objects for SpineNet.""" if not block_specs: block_specs = SPINENET_BLOCK_SPECS logging.info('Building SpineNet block specs: %s', block_specs) return [BlockSpec(*b) for b in block_specs] @tf.keras.utils.register_keras_serializable(package='Vision') class SpineNetMobile(tf.keras.Model): """Creates a Mobile SpineNet family model. This implements: [1] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Golnaz Ghiasi, Mingxing Tan, Yin Cui, Quoc V. Le, Xiaodan Song. SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization. (https://arxiv.org/abs/1912.05027). [2] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Yin Cui, Mingxing Tan, Quoc Le, Xiaodan Song. Efficient Scale-Permuted Backbone with Learned Resource Distribution. (https://arxiv.org/abs/2010.11426). """ def __init__( self, input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec( shape=[None, None, None, 3]), min_level: int = 3, max_level: int = 7, block_specs: Optional[List[BlockSpec]] = None, endpoints_num_filters: int = 256, se_ratio: float = 0.2, block_repeats: int = 1, filter_size_scale: float = 1.0, expand_ratio: int = 6, init_stochastic_depth_rate=0.0, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, use_keras_upsampling_2d: bool = False, **kwargs): """Initializes a Mobile SpineNet model. Args: input_specs: A `tf.keras.layers.InputSpec` of the input tensor. min_level: An `int` of min level for output mutiscale features. max_level: An `int` of max level for output mutiscale features. block_specs: The block specifications for the SpineNet model discovered by NAS. endpoints_num_filters: An `int` of feature dimension for the output endpoints. se_ratio: A `float` of Squeeze-and-Excitation ratio. block_repeats: An `int` of number of blocks contained in the layer. filter_size_scale: A `float` of multiplier for the filters (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. expand_ratio: An `integer` of expansion ratios for inverted bottleneck blocks. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A small `float` added to variance to avoid dividing by zero. use_keras_upsampling_2d: If True, use keras UpSampling2D layer. **kwargs: Additional keyword arguments to be passed. """ self._input_specs = input_specs self._min_level = min_level self._max_level = max_level self._block_specs = ( build_block_specs() if block_specs is None else block_specs ) self._endpoints_num_filters = endpoints_num_filters self._se_ratio = se_ratio self._block_repeats = block_repeats self._filter_size_scale = filter_size_scale self._expand_ratio = expand_ratio self._init_stochastic_depth_rate = init_stochastic_depth_rate self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._activation = activation self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._use_keras_upsampling_2d = use_keras_upsampling_2d self._num_init_blocks = 2 self._norm = layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 # Build SpineNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) net = self._build_stem(inputs=inputs) input_width = input_specs.shape[2] if input_width is None: max_stride = max(map(lambda b: b.level, self._block_specs)) input_width = 2 ** max_stride net = self._build_scale_permuted_network(net=net, input_width=input_width) endpoints = self._build_endpoints(net=net) self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super().__init__(inputs=inputs, outputs=endpoints) def _block_group(self, inputs: tf.Tensor, in_filters: int, out_filters: int, strides: int, expand_ratio: int = 6, block_repeats: int = 1, se_ratio: float = 0.2, stochastic_depth_drop_rate: Optional[float] = None, name: str = 'block_group'): """Creates one group of blocks for the SpineNet model.""" x = nn_blocks.InvertedBottleneckBlock( in_filters=in_filters, out_filters=out_filters, strides=strides, se_gating_activation='hard_sigmoid', se_ratio=se_ratio, expand_ratio=expand_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) for _ in range(1, block_repeats): x = nn_blocks.InvertedBottleneckBlock( in_filters=in_filters, out_filters=out_filters, strides=1, se_ratio=se_ratio, expand_ratio=expand_ratio, stochastic_depth_drop_rate=stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) return tf.keras.layers.Activation('linear', name=name)(x) def _build_stem(self, inputs): """Builds SpineNet stem.""" x = layers.Conv2D( filters=int(FILTER_SIZE_MAP[0] * self._filter_size_scale), kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) net = [] stem_strides = [1, 2] # Build the initial level 2 blocks. for i in range(self._num_init_blocks): x = self._block_group( inputs=x, in_filters=int(FILTER_SIZE_MAP[i] * self._filter_size_scale), out_filters=int(FILTER_SIZE_MAP[i + 1] * self._filter_size_scale), expand_ratio=self._expand_ratio, strides=stem_strides[i], se_ratio=self._se_ratio, block_repeats=self._block_repeats, name='stem_block_{}'.format(i + 1)) net.append(x) return net def _build_scale_permuted_network(self, net, input_width, weighted_fusion=False): """Builds scale-permuted network.""" net_sizes = [ int(math.ceil(input_width / 2)), int(math.ceil(input_width / 2**2)) ] num_outgoing_connections = [0] * len(net) endpoints = {} for i, block_spec in enumerate(self._block_specs): # Update block level if it is larger than max_level to avoid building # blocks smaller than requested. block_spec.level = min(block_spec.level, self._max_level) # Find out specs for the target block. target_width = int(math.ceil(input_width / 2**block_spec.level)) target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] * self._filter_size_scale) # Resample then merge input0 and input1. parents = [] input0 = block_spec.input_offsets[0] input1 = block_spec.input_offsets[1] x0 = self._resample_with_sepconv( inputs=net[input0], input_width=net_sizes[input0], target_width=target_width, target_num_filters=target_num_filters) parents.append(x0) num_outgoing_connections[input0] += 1 x1 = self._resample_with_sepconv( inputs=net[input1], input_width=net_sizes[input1], target_width=target_width, target_num_filters=target_num_filters) parents.append(x1) num_outgoing_connections[input1] += 1 # Merge 0 outdegree blocks to the output block. if block_spec.is_output: for j, (j_feat, j_connections) in enumerate(zip(net, num_outgoing_connections)): if j_connections == 0 and (j_feat.shape[2] == target_width and j_feat.shape[3] == x0.shape[3]): parents.append(j_feat) num_outgoing_connections[j] += 1 # pylint: disable=g-direct-tensorflow-import if weighted_fusion: dtype = parents[0].dtype parent_weights = [ tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format( i, j)), dtype=dtype)) for j in range(len(parents))] weights_sum = parent_weights[0] for adder in parent_weights[1:]: weights_sum = layers.Add()([weights_sum, adder]) parents = [ parents[i] * parent_weights[i] / (weights_sum + 0.0001) for i in range(len(parents)) ] # Fuse all parent nodes then build a new block. x = parents[0] for adder in parents[1:]: x = layers.Add()([x, adder]) x = tf_utils.get_activation( self._activation, use_keras_layer=True)(x) x = self._block_group( inputs=x, in_filters=target_num_filters, out_filters=target_num_filters, strides=1, se_ratio=self._se_ratio, expand_ratio=self._expand_ratio, block_repeats=self._block_repeats, stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 1, len(self._block_specs)), name='scale_permuted_block_{}'.format(i + 1)) net.append(x) net_sizes.append(target_width) num_outgoing_connections.append(0) # Save output feats. if block_spec.is_output: if block_spec.level in endpoints: raise ValueError('Duplicate feats found for output level {}.'.format( block_spec.level)) if (block_spec.level < self._min_level or block_spec.level > self._max_level): logging.warning( 'SpineNet output level out of range [min_level, max_levle] = [%s, %s] will not be used for further processing.', self._min_level, self._max_level) endpoints[str(block_spec.level)] = x return endpoints def _build_endpoints(self, net): """Matches filter size for endpoints before sharing conv layers.""" endpoints = {} for level in range(self._min_level, self._max_level + 1): x = layers.Conv2D( filters=self._endpoints_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( net[str(level)]) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) endpoints[str(level)] = x return endpoints def _resample_with_sepconv(self, inputs, input_width, target_width, target_num_filters): """Matches resolution and feature dimension.""" x = inputs # Spatial resampling. if input_width > target_width: while input_width > target_width: x = layers.DepthwiseConv2D( kernel_size=3, strides=2, padding='SAME', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation( self._activation, use_keras_layer=True)(x) input_width /= 2 elif input_width < target_width: scale = target_width // input_width x = spatial_transform_ops.nearest_upsampling( x, scale=scale, use_keras_layer=self._use_keras_upsampling_2d) # Last 1x1 conv to match filter size. x = layers.Conv2D( filters=target_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) return x def get_config(self): config_dict = { 'min_level': self._min_level, 'max_level': self._max_level, 'endpoints_num_filters': self._endpoints_num_filters, 'se_ratio': self._se_ratio, 'expand_ratio': self._expand_ratio, 'block_repeats': self._block_repeats, 'filter_size_scale': self._filter_size_scale, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'use_keras_upsampling_2d': self._use_keras_upsampling_2d, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('spinenet_mobile') def build_spinenet_mobile( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: """Builds Mobile SpineNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'spinenet_mobile', (f'Inconsistent backbone type ' f'{backbone_type}') model_id = backbone_cfg.model_id if model_id not in SCALING_MAP: raise ValueError( 'Mobile SpineNet-{} is not a valid architecture.'.format(model_id)) scaling_params = SCALING_MAP[model_id] return SpineNetMobile( input_specs=input_specs, min_level=backbone_cfg.min_level, max_level=backbone_cfg.max_level, endpoints_num_filters=scaling_params['endpoints_num_filters'], block_repeats=scaling_params['block_repeats'], filter_size_scale=scaling_params['filter_size_scale'], se_ratio=backbone_cfg.se_ratio, expand_ratio=backbone_cfg.expand_ratio, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, kernel_regularizer=l2_regularizer, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, use_keras_upsampling_2d=backbone_cfg.use_keras_upsampling_2d)
20,774
36.910584
126
py
models
models-master/official/vision/modeling/backbones/spinenet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of SpineNet Networks.""" import math from typing import Any, List, Optional, Tuple # Import libraries from absl import logging import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers from official.vision.ops import spatial_transform_ops layers = tf.keras.layers FILTER_SIZE_MAP = { 1: 32, 2: 64, 3: 128, 4: 256, 5: 256, 6: 256, 7: 256, } # The fixed SpineNet architecture discovered by NAS. # Each element represents a specification of a building block: # (block_level, block_fn, (input_offset0, input_offset1), is_output). SPINENET_BLOCK_SPECS = [ (2, 'bottleneck', (0, 1), False), (4, 'residual', (0, 1), False), (3, 'bottleneck', (2, 3), False), (4, 'bottleneck', (2, 4), False), (6, 'residual', (3, 5), False), (4, 'bottleneck', (3, 5), False), (5, 'residual', (6, 7), False), (7, 'residual', (6, 8), False), (5, 'bottleneck', (8, 9), False), (5, 'bottleneck', (8, 10), False), (4, 'bottleneck', (5, 10), True), (3, 'bottleneck', (4, 10), True), (5, 'bottleneck', (7, 12), True), (7, 'bottleneck', (5, 14), True), (6, 'bottleneck', (12, 14), True), (2, 'bottleneck', (2, 13), True), ] SCALING_MAP = { '49S': { 'endpoints_num_filters': 128, 'filter_size_scale': 0.65, 'resample_alpha': 0.5, 'block_repeats': 1, }, '49': { 'endpoints_num_filters': 256, 'filter_size_scale': 1.0, 'resample_alpha': 0.5, 'block_repeats': 1, }, '96': { 'endpoints_num_filters': 256, 'filter_size_scale': 1.0, 'resample_alpha': 0.5, 'block_repeats': 2, }, '143': { 'endpoints_num_filters': 256, 'filter_size_scale': 1.0, 'resample_alpha': 1.0, 'block_repeats': 3, }, # SpineNet-143 with 1.3x filter_size_scale. '143L': { 'endpoints_num_filters': 256, 'filter_size_scale': 1.3, 'resample_alpha': 1.0, 'block_repeats': 3, }, '190': { 'endpoints_num_filters': 512, 'filter_size_scale': 1.3, 'resample_alpha': 1.0, 'block_repeats': 4, }, } class BlockSpec(object): """A container class that specifies the block configuration for SpineNet.""" def __init__(self, level: int, block_fn: str, input_offsets: Tuple[int, int], is_output: bool): self.level = level self.block_fn = block_fn self.input_offsets = input_offsets self.is_output = is_output def build_block_specs( block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]: """Builds the list of BlockSpec objects for SpineNet.""" if not block_specs: block_specs = SPINENET_BLOCK_SPECS logging.info('Building SpineNet block specs: %s', block_specs) return [BlockSpec(*b) for b in block_specs] @tf.keras.utils.register_keras_serializable(package='Vision') class SpineNet(tf.keras.Model): """Creates a SpineNet family model. This implements: Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Golnaz Ghiasi, Mingxing Tan, Yin Cui, Quoc V. Le, Xiaodan Song. SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization. (https://arxiv.org/abs/1912.05027) """ def __init__( self, input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec( shape=[None, None, None, 3]), min_level: int = 3, max_level: int = 7, block_specs: Optional[List[BlockSpec]] = None, endpoints_num_filters: int = 256, resample_alpha: float = 0.5, block_repeats: int = 1, filter_size_scale: float = 1.0, init_stochastic_depth_rate: float = 0.0, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, **kwargs): """Initializes a SpineNet model. Args: input_specs: A `tf.keras.layers.InputSpec` of the input tensor. min_level: An `int` of min level for output mutiscale features. max_level: An `int` of max level for output mutiscale features. block_specs: A list of block specifications for the SpineNet model discovered by NAS. endpoints_num_filters: An `int` of feature dimension for the output endpoints. resample_alpha: A `float` of resampling factor in cross-scale connections. block_repeats: An `int` of number of blocks contained in the layer. filter_size_scale: A `float` of multiplier for the filters (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A small `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ self._input_specs = input_specs self._min_level = min_level self._max_level = max_level self._block_specs = ( build_block_specs() if block_specs is None else block_specs ) self._endpoints_num_filters = endpoints_num_filters self._resample_alpha = resample_alpha self._block_repeats = block_repeats self._filter_size_scale = filter_size_scale self._init_stochastic_depth_rate = init_stochastic_depth_rate self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._activation = activation self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._init_block_fn = 'bottleneck' self._num_init_blocks = 2 self._set_activation_fn(activation) self._norm = layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 # Build SpineNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) net = self._build_stem(inputs=inputs) input_width = input_specs.shape[2] if input_width is None: max_stride = max(map(lambda b: b.level, self._block_specs)) input_width = 2 ** max_stride net = self._build_scale_permuted_network(net=net, input_width=input_width) endpoints = self._build_endpoints(net=net) self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints) def _set_activation_fn(self, activation): if activation == 'relu': self._activation_fn = tf.nn.relu elif activation == 'swish': self._activation_fn = tf.nn.swish else: raise ValueError('Activation {} not implemented.'.format(activation)) def _block_group(self, inputs: tf.Tensor, filters: int, strides: int, block_fn_cand: str, block_repeats: int = 1, stochastic_depth_drop_rate: Optional[float] = None, name: str = 'block_group'): """Creates one group of blocks for the SpineNet model.""" block_fn_candidates = { 'bottleneck': nn_blocks.BottleneckBlock, 'residual': nn_blocks.ResidualBlock, } block_fn = block_fn_candidates[block_fn_cand] _, _, _, num_filters = inputs.get_shape().as_list() if block_fn_cand == 'bottleneck': use_projection = not (num_filters == (filters * 4) and strides == 1) else: use_projection = not (num_filters == filters and strides == 1) x = block_fn( filters=filters, strides=strides, use_projection=use_projection, stochastic_depth_drop_rate=stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) for _ in range(1, block_repeats): x = block_fn( filters=filters, strides=1, use_projection=False, stochastic_depth_drop_rate=stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( x) return tf.identity(x, name=name) def _build_stem(self, inputs): """Builds SpineNet stem.""" x = layers.Conv2D( filters=64, kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation_fn)(x) x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x) net = [] # Build the initial level 2 blocks. for i in range(self._num_init_blocks): x = self._block_group( inputs=x, filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale), strides=1, block_fn_cand=self._init_block_fn, block_repeats=self._block_repeats, name='stem_block_{}'.format(i + 1)) net.append(x) return net def _build_scale_permuted_network(self, net, input_width, weighted_fusion=False): """Builds scale-permuted network.""" net_sizes = [int(math.ceil(input_width / 2**2))] * len(net) net_block_fns = [self._init_block_fn] * len(net) num_outgoing_connections = [0] * len(net) endpoints = {} for i, block_spec in enumerate(self._block_specs): # Find out specs for the target block. target_width = int(math.ceil(input_width / 2**block_spec.level)) target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] * self._filter_size_scale) target_block_fn = block_spec.block_fn # Resample then merge input0 and input1. parents = [] input0 = block_spec.input_offsets[0] input1 = block_spec.input_offsets[1] x0 = self._resample_with_alpha( inputs=net[input0], input_width=net_sizes[input0], input_block_fn=net_block_fns[input0], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha) parents.append(x0) num_outgoing_connections[input0] += 1 x1 = self._resample_with_alpha( inputs=net[input1], input_width=net_sizes[input1], input_block_fn=net_block_fns[input1], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha) parents.append(x1) num_outgoing_connections[input1] += 1 # Merge 0 outdegree blocks to the output block. if block_spec.is_output: for j, (j_feat, j_connections) in enumerate(zip(net, num_outgoing_connections)): if j_connections == 0 and (j_feat.shape[2] == target_width and j_feat.shape[3] == x0.shape[3]): parents.append(j_feat) num_outgoing_connections[j] += 1 # pylint: disable=g-direct-tensorflow-import if weighted_fusion: dtype = parents[0].dtype parent_weights = [ tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format( i, j)), dtype=dtype)) for j in range(len(parents))] weights_sum = tf.add_n(parent_weights) parents = [ parents[i] * parent_weights[i] / (weights_sum + 0.0001) for i in range(len(parents)) ] # Fuse all parent nodes then build a new block. x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents)) x = self._block_group( inputs=x, filters=target_num_filters, strides=1, block_fn_cand=target_block_fn, block_repeats=self._block_repeats, stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 1, len(self._block_specs)), name='scale_permuted_block_{}'.format(i + 1)) net.append(x) net_sizes.append(target_width) net_block_fns.append(target_block_fn) num_outgoing_connections.append(0) # Save output feats. if block_spec.is_output: if block_spec.level in endpoints: raise ValueError('Duplicate feats found for output level {}.'.format( block_spec.level)) if (block_spec.level < self._min_level or block_spec.level > self._max_level): logging.warning( 'SpineNet output level %s out of range [min_level, max_level] = ' '[%s, %s] will not be used for further processing.', block_spec.level, self._min_level, self._max_level) endpoints[str(block_spec.level)] = x return endpoints def _build_endpoints(self, net): """Matches filter size for endpoints before sharing conv layers.""" endpoints = {} for level in range(self._min_level, self._max_level + 1): x = layers.Conv2D( filters=self._endpoints_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( net[str(level)]) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation_fn)(x) endpoints[str(level)] = x return endpoints def _resample_with_alpha(self, inputs, input_width, input_block_fn, target_width, target_num_filters, target_block_fn, alpha=0.5): """Matches resolution and feature dimension.""" _, _, _, input_num_filters = inputs.get_shape().as_list() if input_block_fn == 'bottleneck': input_num_filters /= 4 new_num_filters = int(input_num_filters * alpha) x = layers.Conv2D( filters=new_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation_fn)(x) # Spatial resampling. if input_width > target_width: x = layers.Conv2D( filters=new_num_filters, kernel_size=3, strides=2, padding='SAME', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) x = tf_utils.get_activation(self._activation_fn)(x) input_width /= 2 while input_width > target_width: x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x) input_width /= 2 elif input_width < target_width: scale = target_width // input_width x = spatial_transform_ops.nearest_upsampling(x, scale=scale) # Last 1x1 conv to match filter size. if target_block_fn == 'bottleneck': target_num_filters *= 4 x = layers.Conv2D( filters=target_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)( x) return x def get_config(self): config_dict = { 'min_level': self._min_level, 'max_level': self._max_level, 'endpoints_num_filters': self._endpoints_num_filters, 'resample_alpha': self._resample_alpha, 'block_repeats': self._block_repeats, 'filter_size_scale': self._filter_size_scale, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('spinenet') def build_spinenet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: """Builds SpineNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'spinenet', (f'Inconsistent backbone type ' f'{backbone_type}') model_id = str(backbone_cfg.model_id) if model_id not in SCALING_MAP: raise ValueError( 'SpineNet-{} is not a valid architecture.'.format(model_id)) scaling_params = SCALING_MAP[model_id] return SpineNet( input_specs=input_specs, min_level=backbone_cfg.min_level, max_level=backbone_cfg.max_level, endpoints_num_filters=scaling_params['endpoints_num_filters'], resample_alpha=scaling_params['resample_alpha'], block_repeats=scaling_params['block_repeats'], filter_size_scale=scaling_params['filter_size_scale'], init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, kernel_regularizer=l2_regularizer, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon)
21,144
35.456897
80
py
models
models-master/official/vision/modeling/backbones/resnet_deeplab.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of Residual Networks with Deeplab modifications.""" from typing import Callable, Optional, Tuple, List import numpy as np import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers # Specifications for different ResNet variants. # Each entry specifies block configurations of the particular ResNet variant. # Each element in the block configuration is in the following format: # (block_fn, num_filters, block_repeats) RESNET_SPECS = { 50: [ ('bottleneck', 64, 3), ('bottleneck', 128, 4), ('bottleneck', 256, 6), ('bottleneck', 512, 3), ], 101: [ ('bottleneck', 64, 3), ('bottleneck', 128, 4), ('bottleneck', 256, 23), ('bottleneck', 512, 3), ], 152: [ ('bottleneck', 64, 3), ('bottleneck', 128, 8), ('bottleneck', 256, 36), ('bottleneck', 512, 3), ], 200: [ ('bottleneck', 64, 3), ('bottleneck', 128, 24), ('bottleneck', 256, 36), ('bottleneck', 512, 3), ], } @tf.keras.utils.register_keras_serializable(package='Vision') class DilatedResNet(tf.keras.Model): """Creates a ResNet model with Deeplabv3 modifications. This backbone is suitable for semantic segmentation. This implements Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam. Rethinking Atrous Convolution for Semantic Image Segmentation. (https://arxiv.org/pdf/1706.05587) """ def __init__( self, model_id: int, output_stride: int, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), stem_type: str = 'v0', resnetd_shortcut: bool = False, replace_stem_max_pool: bool = False, se_ratio: Optional[float] = None, init_stochastic_depth_rate: float = 0.0, multigrid: Optional[Tuple[int]] = None, last_stage_repeats: int = 1, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a ResNet model with DeepLab modification. Args: model_id: An `int` specifies depth of ResNet backbone model. output_stride: An `int` of output stride, ratio of input to output resolution. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. stem_type: A `str` of stem type. Can be `v0` or `v1`. `v1` replaces 7x7 conv by 3 3x3 convs. resnetd_shortcut: A `bool` of whether to use ResNet-D shortcut in downsampling blocks. replace_stem_max_pool: A `bool` of whether to replace the max pool in stem with a stride-2 conv, se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. multigrid: A tuple of the same length as the number of blocks in the last resnet stage. last_stage_repeats: An `int` that specifies how many times last stage is repeated. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ self._model_id = model_id self._output_stride = output_stride self._input_specs = input_specs self._use_sync_bn = use_sync_bn self._activation = activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = layers.BatchNormalization self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._stem_type = stem_type self._resnetd_shortcut = resnetd_shortcut self._replace_stem_max_pool = replace_stem_max_pool self._se_ratio = se_ratio self._init_stochastic_depth_rate = init_stochastic_depth_rate if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 # Build ResNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) if stem_type == 'v0': x = layers.Conv2D( filters=64, kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation)(x) elif stem_type == 'v1': x = layers.Conv2D( filters=64, kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation)(x) x = layers.Conv2D( filters=64, kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation)(x) x = layers.Conv2D( filters=128, kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation)(x) else: raise ValueError('Stem type {} not supported.'.format(stem_type)) if replace_stem_max_pool: x = layers.Conv2D( filters=64, kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation, use_keras_layer=True)(x) else: x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x) normal_resnet_stage = int(np.math.log2(self._output_stride)) - 2 endpoints = {} for i in range(normal_resnet_stage + 1): spec = RESNET_SPECS[model_id][i] if spec[0] == 'bottleneck': block_fn = nn_blocks.BottleneckBlock else: raise ValueError('Block fn `{}` is not supported.'.format(spec[0])) x = self._block_group( inputs=x, filters=spec[1], strides=(1 if i == 0 else 2), dilation_rate=1, block_fn=block_fn, block_repeats=spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 4 + last_stage_repeats), name='block_group_l{}'.format(i + 2)) endpoints[str(i + 2)] = x dilation_rate = 2 for i in range(normal_resnet_stage + 1, 3 + last_stage_repeats): spec = RESNET_SPECS[model_id][i] if i < 3 else RESNET_SPECS[model_id][-1] if spec[0] == 'bottleneck': block_fn = nn_blocks.BottleneckBlock else: raise ValueError('Block fn `{}` is not supported.'.format(spec[0])) x = self._block_group( inputs=x, filters=spec[1], strides=1, dilation_rate=dilation_rate, block_fn=block_fn, block_repeats=spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 4 + last_stage_repeats), multigrid=multigrid if i >= 3 else None, name='block_group_l{}'.format(i + 2)) dilation_rate *= 2 endpoints[str(normal_resnet_stage + 2)] = x self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(DilatedResNet, self).__init__( inputs=inputs, outputs=endpoints, **kwargs) def _block_group(self, inputs: tf.Tensor, filters: int, strides: int, dilation_rate: int, block_fn: Callable[..., tf.keras.layers.Layer], block_repeats: int = 1, stochastic_depth_drop_rate: float = 0.0, multigrid: Optional[List[int]] = None, name: str = 'block_group'): """Creates one group of blocks for the ResNet model. Deeplab applies strides at the last block. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. filters: An `int` off number of filters for the first convolution of the layer. strides: An `int` of stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. dilation_rate: An `int` of diluted convolution rates. block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`. block_repeats: An `int` of number of blocks contained in the layer. stochastic_depth_drop_rate: A `float` of drop rate of the current block group. multigrid: A list of `int` or None. If specified, dilation rates for each block is scaled up by its corresponding factor in the multigrid. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ if multigrid is not None and len(multigrid) != block_repeats: raise ValueError('multigrid has to match number of block_repeats') if multigrid is None: multigrid = [1] * block_repeats # TODO(arashwan): move striding at the of the block. x = block_fn( filters=filters, strides=strides, dilation_rate=dilation_rate * multigrid[0], use_projection=True, stochastic_depth_drop_rate=stochastic_depth_drop_rate, se_ratio=self._se_ratio, resnetd_shortcut=self._resnetd_shortcut, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) for i in range(1, block_repeats): x = block_fn( filters=filters, strides=1, dilation_rate=dilation_rate * multigrid[i], use_projection=False, stochastic_depth_drop_rate=stochastic_depth_drop_rate, resnetd_shortcut=self._resnetd_shortcut, se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( x) return tf.identity(x, name=name) def get_config(self): config_dict = { 'model_id': self._model_id, 'output_stride': self._output_stride, 'stem_type': self._stem_type, 'resnetd_shortcut': self._resnetd_shortcut, 'replace_stem_max_pool': self._replace_stem_max_pool, 'se_ratio': self._se_ratio, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('dilated_resnet') def build_dilated_resnet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds ResNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'dilated_resnet', (f'Inconsistent backbone type ' f'{backbone_type}') return DilatedResNet( model_id=backbone_cfg.model_id, output_stride=backbone_cfg.output_stride, input_specs=input_specs, stem_type=backbone_cfg.stem_type, resnetd_shortcut=backbone_cfg.resnetd_shortcut, replace_stem_max_pool=backbone_cfg.replace_stem_max_pool, se_ratio=backbone_cfg.se_ratio, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, multigrid=backbone_cfg.multigrid, last_stage_repeats=backbone_cfg.last_stage_repeats, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
15,830
36.603325
139
py
models
models-master/official/vision/modeling/backbones/efficientnet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of EfficientNet Networks.""" import math from typing import Any, List, Tuple # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers # The fixed EfficientNet-B0 architecture discovered by NAS. # Each element represents a specification of a building block: # (block_fn, block_repeats, kernel_size, strides, expand_ratio, in_filters, # out_filters, is_output) EN_B0_BLOCK_SPECS = [ ('mbconv', 1, 3, 1, 1, 32, 16, False), ('mbconv', 2, 3, 2, 6, 16, 24, True), ('mbconv', 2, 5, 2, 6, 24, 40, True), ('mbconv', 3, 3, 2, 6, 40, 80, False), ('mbconv', 3, 5, 1, 6, 80, 112, True), ('mbconv', 4, 5, 2, 6, 112, 192, False), ('mbconv', 1, 3, 1, 6, 192, 320, True), ] SCALING_MAP = { 'b0': dict(width_scale=1.0, depth_scale=1.0), 'b1': dict(width_scale=1.0, depth_scale=1.1), 'b2': dict(width_scale=1.1, depth_scale=1.2), 'b3': dict(width_scale=1.2, depth_scale=1.4), 'b4': dict(width_scale=1.4, depth_scale=1.8), 'b5': dict(width_scale=1.6, depth_scale=2.2), 'b6': dict(width_scale=1.8, depth_scale=2.6), 'b7': dict(width_scale=2.0, depth_scale=3.1), } class BlockSpec(): """A container class that specifies the block configuration for MnasNet.""" def __init__(self, block_fn: str, block_repeats: int, kernel_size: int, strides: int, expand_ratio: float, in_filters: int, out_filters: int, is_output: bool, width_scale: float, depth_scale: float): self.block_fn = block_fn self.block_repeats = round_repeats(block_repeats, depth_scale) self.kernel_size = kernel_size self.strides = strides self.expand_ratio = expand_ratio self.in_filters = nn_layers.round_filters(in_filters, width_scale) self.out_filters = nn_layers.round_filters(out_filters, width_scale) self.is_output = is_output def round_repeats(repeats: int, multiplier: float, skip: bool = False) -> int: """Returns rounded number of filters based on depth multiplier.""" if skip or not multiplier: return repeats return int(math.ceil(multiplier * repeats)) def block_spec_decoder(specs: List[Tuple[Any, ...]], width_scale: float, depth_scale: float) -> List[BlockSpec]: """Decodes and returns specs for a block.""" decoded_specs = [] for s in specs: s = s + ( width_scale, depth_scale, ) decoded_specs.append(BlockSpec(*s)) return decoded_specs @tf.keras.utils.register_keras_serializable(package='Vision') class EfficientNet(tf.keras.Model): """Creates an EfficientNet family model. This implements the EfficientNet model from: Mingxing Tan, Quoc V. Le. EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. (https://arxiv.org/pdf/1905.11946) """ def __init__(self, model_id: str, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), se_ratio: float = 0.0, stochastic_depth_drop_rate: float = 0.0, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: tf.keras.regularizers.Regularizer = None, bias_regularizer: tf.keras.regularizers.Regularizer = None, activation: str = 'relu', se_inner_activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): """Initializes an EfficientNet model. Args: model_id: A `str` of model ID of EfficientNet. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. se_ratio: A `float` of squeeze and excitation ratio for inverted bottleneck blocks. stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. activation: A `str` of name of the activation function. se_inner_activation: A `str` of name of the activation function used in Sequeeze and Excitation layer. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ self._model_id = model_id self._input_specs = input_specs self._se_ratio = se_ratio self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._use_sync_bn = use_sync_bn self._activation = activation self._se_inner_activation = se_inner_activation self._kernel_initializer = kernel_initializer self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._norm = layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 # Build EfficientNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) width_scale = SCALING_MAP[model_id]['width_scale'] depth_scale = SCALING_MAP[model_id]['depth_scale'] # Build stem. x = layers.Conv2D( filters=nn_layers.round_filters(32, width_scale), kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) x = tf_utils.get_activation(activation)(x) # Build intermediate blocks. endpoints = {} endpoint_level = 2 decoded_specs = block_spec_decoder(EN_B0_BLOCK_SPECS, width_scale, depth_scale) for i, specs in enumerate(decoded_specs): x = self._block_group( inputs=x, specs=specs, name='block_group_{}'.format(i)) if specs.is_output: endpoints[str(endpoint_level)] = x endpoint_level += 1 # Build output specs for downstream tasks. self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} # Build the final conv for classification. x = layers.Conv2D( filters=nn_layers.round_filters(1280, width_scale), kernel_size=1, strides=1, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)( x) endpoints[str(endpoint_level)] = tf_utils.get_activation(activation)(x) super(EfficientNet, self).__init__( inputs=inputs, outputs=endpoints, **kwargs) def _block_group(self, inputs: tf.Tensor, specs: BlockSpec, name: str = 'block_group'): """Creates one group of blocks for the EfficientNet model. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. specs: The specifications for one inverted bottleneck block group. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ if specs.block_fn == 'mbconv': block_fn = nn_blocks.InvertedBottleneckBlock else: raise ValueError('Block func {} not supported.'.format(specs.block_fn)) x = block_fn( in_filters=specs.in_filters, out_filters=specs.out_filters, expand_ratio=specs.expand_ratio, strides=specs.strides, kernel_size=specs.kernel_size, se_ratio=self._se_ratio, stochastic_depth_drop_rate=self._stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, se_inner_activation=self._se_inner_activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) for _ in range(1, specs.block_repeats): x = block_fn( in_filters=specs.out_filters, # Set 'in_filters' to 'out_filters'. out_filters=specs.out_filters, expand_ratio=specs.expand_ratio, strides=1, # Fix strides to 1. kernel_size=specs.kernel_size, se_ratio=self._se_ratio, stochastic_depth_drop_rate=self._stochastic_depth_drop_rate, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, se_inner_activation=self._se_inner_activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( x) return tf.identity(x, name=name) def get_config(self): config_dict = { 'model_id': self._model_id, 'se_ratio': self._se_ratio, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('efficientnet') def build_efficientnet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None, se_inner_activation: str = 'relu') -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds EfficientNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'efficientnet', (f'Inconsistent backbone type ' f'{backbone_type}') return EfficientNet( model_id=backbone_cfg.model_id, input_specs=input_specs, stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate, se_ratio=backbone_cfg.se_ratio, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, se_inner_activation=se_inner_activation)
12,428
36.663636
116
py
models
models-master/official/vision/modeling/backbones/resnet_3d_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import resnet_3d class ResNet3DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 50, 4, 'v0', False, 0.0), (128, 50, 4, 'v1', False, 0.2), (256, 50, 4, 'v1', True, 0.2), ) def test_network_creation(self, input_size, model_id, endpoint_filter_scale, stem_type, se_ratio, init_stochastic_depth_rate): """Test creation of ResNet3D family models.""" tf.keras.backend.set_image_data_format('channels_last') temporal_strides = [1, 1, 1, 1] temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)] use_self_gating = [True, False, True, False] network = resnet_3d.ResNet3D( model_id=model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, use_self_gating=use_self_gating, stem_type=stem_type, se_ratio=se_ratio, init_stochastic_depth_rate=init_stochastic_depth_rate) inputs = tf.keras.Input(shape=(8, input_size, input_size, 3), batch_size=1) endpoints = network(inputs) self.assertAllEqual([ 1, 2, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale ], endpoints['2'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale ], endpoints['3'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale ], endpoints['4'].shape.as_list()) self.assertAllEqual([ 1, 2, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale ], endpoints['5'].shape.as_list()) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id=50, temporal_strides=[1, 1, 1, 1], temporal_kernel_sizes=[(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1), (1, 3, 1)], stem_type='v0', stem_conv_temporal_kernel_size=5, stem_conv_temporal_stride=2, stem_pool_temporal_stride=2, se_ratio=0.0, use_self_gating=None, init_stochastic_depth_rate=0.0, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, ) network = resnet_3d.ResNet3D(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = resnet_3d.ResNet3D.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,789
35.796117
79
py
models
models-master/official/vision/modeling/backbones/mobiledet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Mobiledet.""" import itertools from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import mobiledet class MobileDetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( 'MobileDetCPU', 'MobileDetDSP', 'MobileDetEdgeTPU', 'MobileDetGPU', ) def test_serialize_deserialize(self, model_id): # Create a network object that sets all of its config options. kwargs = dict( model_id=model_id, filter_size_scale=1.0, use_sync_bn=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, norm_momentum=0.99, norm_epsilon=0.001, min_depth=8, divisible_by=8, regularize_depthwise=False, ) network = mobiledet.MobileDet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = mobiledet.MobileDet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) @parameterized.parameters( itertools.product( [1, 3], [ 'MobileDetCPU', 'MobileDetDSP', 'MobileDetEdgeTPU', 'MobileDetGPU', ], )) def test_input_specs(self, input_dim, model_id): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = mobiledet.MobileDet(model_id=model_id, input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) @parameterized.parameters( itertools.product( [ 'MobileDetCPU', 'MobileDetDSP', 'MobileDetEdgeTPU', 'MobileDetGPU', ], [32, 224], )) def test_mobiledet_creation(self, model_id, input_size): """Test creation of MobileDet family models.""" tf.keras.backend.set_image_data_format('channels_last') mobiledet_layers = { # The number of filters of layers having outputs been collected # for filter_size_scale = 1.0 'MobileDetCPU': [8, 16, 32, 72, 144], 'MobileDetDSP': [24, 32, 64, 144, 240], 'MobileDetEdgeTPU': [16, 16, 40, 96, 384], 'MobileDetGPU': [16, 32, 64, 128, 384], } network = mobiledet.MobileDet(model_id=model_id, filter_size_scale=1.0) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) for idx, num_filter in enumerate(mobiledet_layers[model_id]): self.assertAllEqual( [1, input_size / 2 ** (idx+1), input_size / 2 ** (idx+1), num_filter], endpoints[str(idx+1)].shape.as_list())
3,794
32
80
py
models
models-master/official/vision/modeling/backbones/efficientnet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for EfficientNet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import efficientnet class EfficientNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters(32, 224) def test_network_creation(self, input_size): """Test creation of EfficientNet family models.""" tf.keras.backend.set_image_data_format('channels_last') network = efficientnet.EfficientNet(model_id='b0') inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) self.assertAllEqual([1, input_size / 2**2, input_size / 2**2, 24], endpoints['2'].shape.as_list()) self.assertAllEqual([1, input_size / 2**3, input_size / 2**3, 40], endpoints['3'].shape.as_list()) self.assertAllEqual([1, input_size / 2**4, input_size / 2**4, 112], endpoints['4'].shape.as_list()) self.assertAllEqual([1, input_size / 2**5, input_size / 2**5, 320], endpoints['5'].shape.as_list()) @parameterized.parameters('b0', 'b3', 'b6') def test_network_scaling(self, model_id): """Test compound scaling.""" efficientnet_params = { 'b0': 4049564, 'b3': 10783528, 'b6': 40960136, } tf.keras.backend.set_image_data_format('channels_last') input_size = 32 network = efficientnet.EfficientNet(model_id=model_id, se_ratio=0.25) self.assertEqual(network.count_params(), efficientnet_params[model_id]) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) _ = network(inputs) @parameterized.parameters(1, 3) def test_input_specs(self, input_dim): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = efficientnet.EfficientNet(model_id='b0', input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id='b0', se_ratio=0.25, stochastic_depth_drop_rate=None, use_sync_bn=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, ) network = efficientnet.EfficientNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = efficientnet.EfficientNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,752
35.086538
80
py
models
models-master/official/vision/modeling/backbones/resnet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of ResNet and ResNet-RS models.""" from typing import Callable, Optional # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers # Specifications for different ResNet variants. # Each entry specifies block configurations of the particular ResNet variant. # Each element in the block configuration is in the following format: # (block_fn, num_filters, block_repeats) RESNET_SPECS = { 10: [ ('residual', 64, 1), ('residual', 128, 1), ('residual', 256, 1), ('residual', 512, 1), ], 18: [ ('residual', 64, 2), ('residual', 128, 2), ('residual', 256, 2), ('residual', 512, 2), ], 26: [ ('residual', 64, 3), ('residual', 128, 3), ('residual', 256, 3), ('residual', 512, 3), ], 34: [ ('residual', 64, 3), ('residual', 128, 4), ('residual', 256, 6), ('residual', 512, 3), ], 50: [ ('bottleneck', 64, 3), ('bottleneck', 128, 4), ('bottleneck', 256, 6), ('bottleneck', 512, 3), ], 101: [ ('bottleneck', 64, 3), ('bottleneck', 128, 4), ('bottleneck', 256, 23), ('bottleneck', 512, 3), ], 152: [ ('bottleneck', 64, 3), ('bottleneck', 128, 8), ('bottleneck', 256, 36), ('bottleneck', 512, 3), ], 200: [ ('bottleneck', 64, 3), ('bottleneck', 128, 24), ('bottleneck', 256, 36), ('bottleneck', 512, 3), ], 270: [ ('bottleneck', 64, 4), ('bottleneck', 128, 29), ('bottleneck', 256, 53), ('bottleneck', 512, 4), ], 350: [ ('bottleneck', 64, 4), ('bottleneck', 128, 36), ('bottleneck', 256, 72), ('bottleneck', 512, 4), ], 420: [ ('bottleneck', 64, 4), ('bottleneck', 128, 44), ('bottleneck', 256, 87), ('bottleneck', 512, 4), ], } @tf.keras.utils.register_keras_serializable(package='Vision') class ResNet(tf.keras.Model): """Creates ResNet and ResNet-RS family models. This implements the Deep Residual Network from: Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. Deep Residual Learning for Image Recognition. (https://arxiv.org/pdf/1512.03385) and Irwan Bello, William Fedus, Xianzhi Du, Ekin D. Cubuk, Aravind Srinivas, Tsung-Yi Lin, Jonathon Shlens, Barret Zoph. Revisiting ResNets: Improved Training and Scaling Strategies. (https://arxiv.org/abs/2103.07579). """ def __init__( self, model_id: int, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), depth_multiplier: float = 1.0, stem_type: str = 'v0', resnetd_shortcut: bool = False, replace_stem_max_pool: bool = False, se_ratio: Optional[float] = None, init_stochastic_depth_rate: float = 0.0, scale_stem: bool = True, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bn_trainable: bool = True, **kwargs): """Initializes a ResNet model. Args: model_id: An `int` of the depth of ResNet backbone model. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. depth_multiplier: A `float` of the depth multiplier to uniformaly scale up all layers in channel size. This argument is also referred to as `width_multiplier` in (https://arxiv.org/abs/2103.07579). stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to `v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187). resnetd_shortcut: A `bool` of whether to use ResNet-D shortcut in downsampling blocks. replace_stem_max_pool: A `bool` of whether to replace the max pool in stem with a stride-2 conv, se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. scale_stem: A `bool` of whether to scale stem layers. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A small `float` added to variance to avoid dividing by zero. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bn_trainable: A `bool` that indicates whether batch norm layers should be trainable. Default to True. **kwargs: Additional keyword arguments to be passed. """ self._model_id = model_id self._input_specs = input_specs self._depth_multiplier = depth_multiplier self._stem_type = stem_type self._resnetd_shortcut = resnetd_shortcut self._replace_stem_max_pool = replace_stem_max_pool self._se_ratio = se_ratio self._init_stochastic_depth_rate = init_stochastic_depth_rate self._scale_stem = scale_stem self._use_sync_bn = use_sync_bn self._activation = activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = layers.BatchNormalization self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._bn_trainable = bn_trainable if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 # Build ResNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) x = self._stem(inputs) endpoints = {} for i, spec in enumerate(RESNET_SPECS[model_id]): if spec[0] == 'residual': block_fn = nn_blocks.ResidualBlock elif spec[0] == 'bottleneck': block_fn = nn_blocks.BottleneckBlock else: raise ValueError('Block fn `{}` is not supported.'.format(spec[0])) x = self._block_group( inputs=x, filters=int(spec[1] * self._depth_multiplier), strides=(1 if i == 0 else 2), block_fn=block_fn, block_repeats=spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 5), name='block_group_l{}'.format(i + 2)) endpoints[str(i + 2)] = x self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(ResNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs) def _stem(self, inputs): stem_depth_multiplier = self._depth_multiplier if self._scale_stem else 1.0 if self._stem_type == 'v0': x = layers.Conv2D( filters=int(64 * stem_depth_multiplier), kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, )(inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, )(x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) elif self._stem_type == 'v1': x = layers.Conv2D( filters=int(32 * stem_depth_multiplier), kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, )(inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, )(x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) x = layers.Conv2D( filters=int(32 * stem_depth_multiplier), kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, )(x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, )(x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) x = layers.Conv2D( filters=int(64 * stem_depth_multiplier), kernel_size=3, strides=1, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, )(x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, )(x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) else: raise ValueError('Stem type {} not supported.'.format(self._stem_type)) if self._replace_stem_max_pool: x = layers.Conv2D( filters=int(64 * self._depth_multiplier), kernel_size=3, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, )(x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, trainable=self._bn_trainable, synchronized=self._use_sync_bn, )(x) x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x) else: x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x) return x def _block_group(self, inputs: tf.Tensor, filters: int, strides: int, block_fn: Callable[..., tf.keras.layers.Layer], block_repeats: int = 1, stochastic_depth_drop_rate: float = 0.0, name: str = 'block_group'): """Creates one group of blocks for the ResNet model. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. filters: An `int` number of filters for the first convolution of the layer. strides: An `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. block_fn: The type of block group. Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`. block_repeats: An `int` number of blocks contained in the layer. stochastic_depth_drop_rate: A `float` of drop rate of the current block group. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ x = block_fn( filters=filters, strides=strides, use_projection=True, stochastic_depth_drop_rate=stochastic_depth_drop_rate, se_ratio=self._se_ratio, resnetd_shortcut=self._resnetd_shortcut, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, bn_trainable=self._bn_trainable)( inputs) for _ in range(1, block_repeats): x = block_fn( filters=filters, strides=1, use_projection=False, stochastic_depth_drop_rate=stochastic_depth_drop_rate, se_ratio=self._se_ratio, resnetd_shortcut=self._resnetd_shortcut, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, bn_trainable=self._bn_trainable)( x) return tf.keras.layers.Activation('linear', name=name)(x) def get_config(self): config_dict = { 'model_id': self._model_id, 'depth_multiplier': self._depth_multiplier, 'stem_type': self._stem_type, 'resnetd_shortcut': self._resnetd_shortcut, 'replace_stem_max_pool': self._replace_stem_max_pool, 'activation': self._activation, 'se_ratio': self._se_ratio, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'scale_stem': self._scale_stem, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'bn_trainable': self._bn_trainable } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('resnet') def build_resnet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds ResNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'resnet', (f'Inconsistent backbone type ' f'{backbone_type}') return ResNet( model_id=backbone_cfg.model_id, input_specs=input_specs, depth_multiplier=backbone_cfg.depth_multiplier, stem_type=backbone_cfg.stem_type, resnetd_shortcut=backbone_cfg.resnetd_shortcut, replace_stem_max_pool=backbone_cfg.replace_stem_max_pool, se_ratio=backbone_cfg.se_ratio, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, scale_stem=backbone_cfg.scale_stem, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer, bn_trainable=backbone_cfg.bn_trainable)
16,374
35.797753
139
py
models
models-master/official/vision/modeling/backbones/resnet_deeplab_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet_deeplab models.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling.backbones import resnet_deeplab class ResNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 50, 4, 8), (128, 101, 4, 8), (128, 152, 4, 8), (128, 200, 4, 8), (128, 50, 4, 16), (128, 101, 4, 16), (128, 152, 4, 16), (128, 200, 4, 16), ) def test_network_creation(self, input_size, model_id, endpoint_filter_scale, output_stride): """Test creation of ResNet models.""" tf.keras.backend.set_image_data_format('channels_last') network = resnet_deeplab.DilatedResNet(model_id=model_id, output_stride=output_stride) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) print(endpoints) self.assertAllEqual([ 1, input_size / output_stride, input_size / output_stride, 512 * endpoint_filter_scale ], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list()) @parameterized.parameters( ('v0', None, 0.0, False, False), ('v1', None, 0.0, False, False), ('v1', 0.25, 0.0, False, False), ('v1', 0.25, 0.2, False, False), ('v1', 0.25, 0.0, True, False), ('v1', 0.25, 0.2, False, True), ('v1', None, 0.2, True, True), ) def test_network_features(self, stem_type, se_ratio, init_stochastic_depth_rate, resnetd_shortcut, replace_stem_max_pool): """Test additional features of ResNet models.""" input_size = 128 model_id = 50 endpoint_filter_scale = 4 output_stride = 8 tf.keras.backend.set_image_data_format('channels_last') network = resnet_deeplab.DilatedResNet( model_id=model_id, output_stride=output_stride, stem_type=stem_type, resnetd_shortcut=resnetd_shortcut, replace_stem_max_pool=replace_stem_max_pool, se_ratio=se_ratio, init_stochastic_depth_rate=init_stochastic_depth_rate) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) print(endpoints) self.assertAllEqual([ 1, input_size / output_stride, input_size / output_stride, 512 * endpoint_filter_scale ], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list()) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], use_sync_bn=[False, True], )) def test_sync_bn_multiple_devices(self, strategy, use_sync_bn): """Test for sync bn on TPU and GPU devices.""" inputs = np.random.rand(64, 128, 128, 3) tf.keras.backend.set_image_data_format('channels_last') with strategy.scope(): network = resnet_deeplab.DilatedResNet( model_id=50, output_stride=8, use_sync_bn=use_sync_bn) _ = network(inputs) @parameterized.parameters(1, 3, 4) def test_input_specs(self, input_dim): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = resnet_deeplab.DilatedResNet( model_id=50, output_stride=8, input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id=50, output_stride=8, stem_type='v0', se_ratio=0.25, init_stochastic_depth_rate=0.2, resnetd_shortcut=False, replace_stem_max_pool=False, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, ) network = resnet_deeplab.DilatedResNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = resnet_deeplab.DilatedResNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
5,510
34.326923
80
py
models
models-master/official/vision/modeling/backbones/factory_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for factory functions.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from tensorflow.python.distribute import combinations from official.vision.configs import backbones as backbones_cfg from official.vision.configs import backbones_3d as backbones_3d_cfg from official.vision.configs import common as common_cfg from official.vision.modeling import backbones from official.vision.modeling.backbones import factory class FactoryTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine(model_id=[18, 34, 50, 101, 152],)) def test_resnet_creation(self, model_id): """Test creation of ResNet models.""" network = backbones.ResNet( model_id=model_id, se_ratio=0.0, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='resnet', resnet=backbones_cfg.ResNet(model_id=model_id, se_ratio=0.0)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) @combinations.generate( combinations.combine( model_id=['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7'], se_ratio=[0.0, 0.25], )) def test_efficientnet_creation(self, model_id, se_ratio): """Test creation of EfficientNet models.""" network = backbones.EfficientNet( model_id=model_id, se_ratio=se_ratio, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='efficientnet', efficientnet=backbones_cfg.EfficientNet( model_id=model_id, se_ratio=se_ratio)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) @combinations.generate( combinations.combine( model_id=['MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU'], filter_size_scale=[1.0, 0.75], )) def test_mobilenet_creation(self, model_id, filter_size_scale): """Test creation of Mobilenet models.""" network = backbones.MobileNet( model_id=model_id, filter_size_scale=filter_size_scale, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='mobilenet', mobilenet=backbones_cfg.MobileNet( model_id=model_id, filter_size_scale=filter_size_scale)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) @combinations.generate(combinations.combine(model_id=['49'],)) def test_spinenet_creation(self, model_id): """Test creation of SpineNet models.""" input_size = 128 min_level = 3 max_level = 7 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size, input_size, 3]) network = backbones.SpineNet( input_specs=input_specs, min_level=min_level, max_level=max_level, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='spinenet', spinenet=backbones_cfg.SpineNet(model_id=model_id)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec( shape=[None, input_size, input_size, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) @combinations.generate( combinations.combine(model_id=[38, 56, 104],)) def test_revnet_creation(self, model_id): """Test creation of RevNet models.""" network = backbones.RevNet( model_id=model_id, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='revnet', revnet=backbones_cfg.RevNet(model_id=model_id)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) @combinations.generate(combinations.combine(model_type=['resnet_3d'],)) def test_resnet_3d_creation(self, model_type): """Test creation of ResNet 3D models.""" backbone_cfg = backbones_3d_cfg.Backbone3D(type=model_type).get() temporal_strides = [] temporal_kernel_sizes = [] for block_spec in backbone_cfg.block_specs: temporal_strides.append(block_spec.temporal_strides) temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes) _ = backbones.ResNet3D( model_id=backbone_cfg.model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, norm_momentum=0.99, norm_epsilon=1e-5) @combinations.generate( combinations.combine( model_id=[ 'MobileDetCPU', 'MobileDetDSP', 'MobileDetEdgeTPU', 'MobileDetGPU'], filter_size_scale=[1.0, 0.75], )) def test_mobiledet_creation(self, model_id, filter_size_scale): """Test creation of Mobiledet models.""" network = backbones.MobileDet( model_id=model_id, filter_size_scale=filter_size_scale, norm_momentum=0.99, norm_epsilon=1e-5) backbone_config = backbones_cfg.Backbone( type='mobiledet', mobiledet=backbones_cfg.MobileDet( model_id=model_id, filter_size_scale=filter_size_scale)) norm_activation_config = common_cfg.NormActivation( norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False) factory_network = factory.build_backbone( input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]), backbone_config=backbone_config, norm_activation_config=norm_activation_config) network_config = network.get_config() factory_network_config = factory_network.get_config() self.assertEqual(network_config, factory_network_config) if __name__ == '__main__': tf.test.main()
8,383
35.77193
79
py
models
models-master/official/vision/modeling/backbones/vit_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for VIT.""" import math from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import vit class VisionTransformerTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (224, 85798656), (256, 85844736), ) def test_network_creation(self, input_size, params_count): """Test creation of VisionTransformer family models.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec( shape=[2, input_size, input_size, 3]) network = vit.VisionTransformer(input_specs=input_specs) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) _ = network(inputs) self.assertEqual(network.count_params(), params_count) @parameterized.product( patch_size=[6, 4], output_2d_feature_maps=[True, False], pooler=['none', 'gap', 'token'], ) def test_network_with_diferent_configs( self, patch_size, output_2d_feature_maps, pooler): tf.keras.backend.set_image_data_format('channels_last') input_size = 24 expected_feat_level = str(round(math.log2(patch_size))) num_patch_rows = input_size // patch_size input_specs = tf.keras.layers.InputSpec( shape=[2, input_size, input_size, 3]) network = vit.VisionTransformer( input_specs=input_specs, patch_size=patch_size, pooler=pooler, hidden_size=8, mlp_dim=8, num_layers=1, num_heads=2, representation_size=16, output_2d_feature_maps=output_2d_feature_maps) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) output = network(inputs) if pooler == 'none': self.assertEqual( output['encoded_tokens'].shape, [1, num_patch_rows**2, 16]) else: self.assertEqual(output['pre_logits'].shape, [1, 1, 1, 16]) if output_2d_feature_maps: self.assertIn(expected_feat_level, output) self.assertIn(expected_feat_level, network.output_specs) self.assertEqual( network.output_specs[expected_feat_level][1:], [num_patch_rows, num_patch_rows, 8]) else: self.assertNotIn(expected_feat_level, output) def test_posembedding_interpolation(self): tf.keras.backend.set_image_data_format('channels_last') input_size = 256 input_specs = tf.keras.layers.InputSpec( shape=[2, input_size, input_size, 3]) network = vit.VisionTransformer( input_specs=input_specs, patch_size=16, pooler='gap', pos_embed_shape=(14, 14)) # (224 // 16) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) output = network(inputs)['pre_logits'] self.assertEqual(output.shape, [1, 1, 1, 768]) if __name__ == '__main__': tf.test.main()
3,453
33.19802
76
py
models
models-master/official/vision/modeling/backbones/vit.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VisionTransformer models.""" import math from typing import Optional, Tuple from absl import logging import tensorflow as tf from official.modeling import activations from official.vision.modeling.backbones import factory from official.vision.modeling.backbones.vit_specs import VIT_SPECS from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers class AddPositionEmbs(layers.Layer): """Adds (optionally learned) positional embeddings to the inputs.""" def __init__(self, posemb_init: Optional[tf.keras.initializers.Initializer] = None, posemb_origin_shape: Optional[Tuple[int, int]] = None, posemb_target_shape: Optional[Tuple[int, int]] = None, **kwargs): """Constructs Positional Embedding module. The logic of this module is: the learnable positional embeddings length will be determined by the inputs_shape or posemb_origin_shape (if provided) during the construction. If the posemb_target_shape is provided and is different from the positional embeddings length, the embeddings will be interpolated during the forward call. Args: posemb_init: The positional embedding initializer. posemb_origin_shape: The intended positional embedding shape. posemb_target_shape: The potential target shape positional embedding may be interpolated to. **kwargs: other args. """ super().__init__(**kwargs) self.posemb_init = posemb_init self.posemb_origin_shape = posemb_origin_shape self.posemb_target_shape = posemb_target_shape def build(self, inputs_shape): if self.posemb_origin_shape is not None: pos_emb_length = self.posemb_origin_shape[0] * self.posemb_origin_shape[1] else: pos_emb_length = inputs_shape[1] pos_emb_shape = (1, pos_emb_length, inputs_shape[2]) self.pos_embedding = self.add_weight( 'pos_embedding', pos_emb_shape, initializer=self.posemb_init) def _interpolate(self, pos_embedding: tf.Tensor, from_shape: Tuple[int, int], to_shape: Tuple[int, int]) -> tf.Tensor: """Interpolates the positional embeddings.""" logging.info('Interpolating postional embedding from length: %s to %s', from_shape, to_shape) grid_emb = tf.reshape(pos_embedding, [1] + list(from_shape) + [-1]) # NOTE: Using BILINEAR interpolation by default. grid_emb = tf.image.resize(grid_emb, to_shape) return tf.reshape(grid_emb, [1, to_shape[0] * to_shape[1], -1]) def call(self, inputs, inputs_positions=None): del inputs_positions pos_embedding = self.pos_embedding # inputs.shape is (batch_size, seq_len, emb_dim). if inputs.shape[1] != pos_embedding.shape[1]: pos_embedding = self._interpolate( pos_embedding, from_shape=self.posemb_origin_shape, to_shape=self.posemb_target_shape) pos_embedding = tf.cast(pos_embedding, inputs.dtype) return inputs + pos_embedding class TokenLayer(layers.Layer): """A simple layer to wrap token parameters.""" def build(self, inputs_shape): self.cls = self.add_weight( 'cls', (1, 1, inputs_shape[-1]), initializer='zeros') def call(self, inputs): cls = tf.cast(self.cls, inputs.dtype) cls = cls + tf.zeros_like(inputs[:, 0:1]) # A hacky way to tile. x = tf.concat([cls, inputs], axis=1) return x class Encoder(layers.Layer): """Transformer Encoder.""" def __init__(self, num_layers, mlp_dim, num_heads, dropout_rate=0.1, attention_dropout_rate=0.1, kernel_regularizer=None, inputs_positions=None, init_stochastic_depth_rate=0.0, kernel_initializer='glorot_uniform', add_pos_embed=True, pos_embed_origin_shape=None, pos_embed_target_shape=None, layer_scale_init_value=0.0, transformer_partition_dims=None, **kwargs): super().__init__(**kwargs) self._num_layers = num_layers self._mlp_dim = mlp_dim self._num_heads = num_heads self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._kernel_regularizer = kernel_regularizer self._inputs_positions = inputs_positions self._init_stochastic_depth_rate = init_stochastic_depth_rate self._kernel_initializer = kernel_initializer self._add_pos_embed = add_pos_embed self._pos_embed_origin_shape = pos_embed_origin_shape self._pos_embed_target_shape = pos_embed_target_shape self._layer_scale_init_value = layer_scale_init_value self._transformer_partition_dims = transformer_partition_dims def build(self, input_shape): if self._add_pos_embed: self._pos_embed = AddPositionEmbs( posemb_init=tf.keras.initializers.RandomNormal(stddev=0.02), posemb_origin_shape=self._pos_embed_origin_shape, posemb_target_shape=self._pos_embed_target_shape, name='posembed_input') self._dropout = layers.Dropout(rate=self._dropout_rate) self._encoder_layers = [] # Set layer norm epsilons to 1e-6 to be consistent with JAX implementation. # https://flax.readthedocs.io/en/latest/_autosummary/flax.deprecated.nn.LayerNorm.html for i in range(self._num_layers): encoder_layer = nn_blocks.TransformerEncoderBlock( inner_activation=activations.gelu, num_attention_heads=self._num_heads, inner_dim=self._mlp_dim, output_dropout=self._dropout_rate, attention_dropout=self._attention_dropout_rate, kernel_regularizer=self._kernel_regularizer, kernel_initializer=self._kernel_initializer, norm_first=True, stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 1, self._num_layers), norm_epsilon=1e-6, layer_scale_init_value=self._layer_scale_init_value, transformer_partition_dims=self._transformer_partition_dims) self._encoder_layers.append(encoder_layer) self._norm = layers.LayerNormalization(epsilon=1e-6) super().build(input_shape) def call(self, inputs, training=None): x = inputs if self._add_pos_embed: x = self._pos_embed(x, inputs_positions=self._inputs_positions) x = self._dropout(x, training=training) for encoder_layer in self._encoder_layers: x = encoder_layer(x, training=training) x = self._norm(x) return x def get_config(self): config = super().get_config() updates = { 'num_layers': self._num_layers, 'mlp_dim': self._mlp_dim, 'num_heads': self._num_heads, 'dropout_rate': self._dropout_rate, 'attention_dropout_rate': self._attention_dropout_rate, 'kernel_regularizer': self._kernel_regularizer, 'inputs_positions': self._inputs_positions, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'kernel_initializer': self._kernel_initializer, 'add_pos_embed': self._add_pos_embed, 'pos_embed_origin_shape': self._pos_embed_origin_shape, 'pos_embed_target_shape': self._pos_embed_target_shape, 'layer_scale_init_value': self._layer_scale_init_value, 'transformer_partition_dims': self._transformer_partition_dims, } config.update(updates) return config class VisionTransformer(tf.keras.Model): """Class to build VisionTransformer family model.""" def __init__( self, mlp_dim=3072, num_heads=12, num_layers=12, attention_dropout_rate=0.0, dropout_rate=0.1, init_stochastic_depth_rate=0.0, input_specs=layers.InputSpec(shape=[None, None, None, 3]), patch_size=16, hidden_size=768, representation_size=0, pooler='token', kernel_regularizer=None, original_init: bool = True, output_encoded_tokens: bool = True, output_2d_feature_maps: bool = False, pos_embed_shape: Optional[Tuple[int, int]] = None, layer_scale_init_value: float = 0.0, transformer_partition_dims: Optional[Tuple[int, int, int, int]] = None, ): """VisionTransformer initialization function.""" self._mlp_dim = mlp_dim self._num_heads = num_heads self._num_layers = num_layers self._hidden_size = hidden_size self._patch_size = patch_size inputs = tf.keras.Input(shape=input_specs.shape[1:]) x = layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding='valid', kernel_regularizer=kernel_regularizer, kernel_initializer='lecun_normal' if original_init else 'he_uniform')( inputs) if tf.keras.backend.image_data_format() == 'channels_last': rows_axis, cols_axis = (1, 2) else: rows_axis, cols_axis = (2, 3) # The reshape below assumes the data_format is 'channels_last,' so # transpose to that. Once the data is flattened by the reshape, the # data_format is irrelevant, so no need to update # tf.keras.backend.image_data_format. x = tf.transpose(x, perm=[0, 2, 3, 1]) pos_embed_target_shape = (x.shape[rows_axis], x.shape[cols_axis]) feat_h = input_specs.shape[rows_axis] // patch_size feat_w = input_specs.shape[cols_axis] // patch_size seq_len = feat_h * feat_w x = tf.reshape(x, [-1, seq_len, hidden_size]) # If we want to add a class token, add it here. if pooler == 'token': x = TokenLayer(name='cls')(x) x = Encoder( num_layers=num_layers, mlp_dim=mlp_dim, num_heads=num_heads, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, kernel_regularizer=kernel_regularizer, kernel_initializer='glorot_uniform' if original_init else dict( class_name='TruncatedNormal', config=dict(stddev=.02)), init_stochastic_depth_rate=init_stochastic_depth_rate, pos_embed_origin_shape=pos_embed_shape, pos_embed_target_shape=pos_embed_target_shape, layer_scale_init_value=layer_scale_init_value)( x) if pooler == 'token': output_feature = x[:, 1:] x = x[:, 0] elif pooler == 'gap': output_feature = x x = tf.reduce_mean(x, axis=1) elif pooler == 'none': output_feature = x x = tf.identity(x, name='encoded_tokens') else: raise ValueError(f'unrecognized pooler type: {pooler}') endpoints = {} if output_2d_feature_maps: # Use the closest feature level. feat_level = round(math.log2(patch_size)) logging.info( 'VisionTransformer patch size %d and feature level: %d', patch_size, feat_level, ) endpoints[str(feat_level)] = tf.reshape( output_feature, [-1, feat_h, feat_w, x.shape.as_list()[-1]]) # Don"t include `pre_logits` or `encoded_tokens` to support decoders. self._output_specs = {k: v.shape for k, v in endpoints.items()} if representation_size: x = layers.Dense( representation_size, kernel_regularizer=kernel_regularizer, name='pre_logits', kernel_initializer='lecun_normal' if original_init else 'he_uniform', )(x) x = tf.nn.tanh(x) else: x = tf.identity(x, name='pre_logits') if pooler == 'none': if output_encoded_tokens: endpoints['encoded_tokens'] = x else: endpoints['pre_logits'] = tf.reshape( x, [-1, 1, 1, representation_size or hidden_size]) super().__init__(inputs=inputs, outputs=endpoints) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('vit') def build_vit(input_specs, backbone_config, norm_activation_config, l2_regularizer=None): """Build ViT model.""" del norm_activation_config backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'vit', (f'Inconsistent backbone type ' f'{backbone_type}') backbone_cfg.override(VIT_SPECS[backbone_cfg.model_name]) logging.info( ( 'ViT specs: mlp_dim=%d, num_heads=%d, num_layers=%d,' 'patch_size=%d, hidden_size=%d, representation_size=%d.' ), backbone_cfg.transformer.mlp_dim, backbone_cfg.transformer.num_heads, backbone_cfg.transformer.num_layers, backbone_cfg.patch_size, backbone_cfg.hidden_size, backbone_cfg.representation_size, ) return VisionTransformer( mlp_dim=backbone_cfg.transformer.mlp_dim, num_heads=backbone_cfg.transformer.num_heads, num_layers=backbone_cfg.transformer.num_layers, attention_dropout_rate=backbone_cfg.transformer.attention_dropout_rate, dropout_rate=backbone_cfg.transformer.dropout_rate, init_stochastic_depth_rate=backbone_cfg.init_stochastic_depth_rate, input_specs=input_specs, patch_size=backbone_cfg.patch_size, hidden_size=backbone_cfg.hidden_size, representation_size=backbone_cfg.representation_size, pooler=backbone_cfg.pooler, kernel_regularizer=l2_regularizer, original_init=backbone_cfg.original_init, output_encoded_tokens=backbone_cfg.output_encoded_tokens, output_2d_feature_maps=backbone_cfg.output_2d_feature_maps, layer_scale_init_value=backbone_cfg.layer_scale_init_value, pos_embed_shape=backbone_cfg.pos_embed_shape, transformer_partition_dims=backbone_cfg.transformer_partition_dims)
14,428
36.971053
90
py
models
models-master/official/vision/modeling/backbones/revnet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of RevNet.""" from typing import Any, Callable, Dict, Optional # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks # Specifications for different RevNet variants. # Each entry specifies block configurations of the particular RevNet variant. # Each element in the block configuration is in the following format: # (block_fn, num_filters, block_repeats) REVNET_SPECS = { 38: [ ('residual', 32, 3), ('residual', 64, 3), ('residual', 112, 3), ], 56: [ ('bottleneck', 128, 2), ('bottleneck', 256, 2), ('bottleneck', 512, 3), ('bottleneck', 832, 2), ], 104: [ ('bottleneck', 128, 2), ('bottleneck', 256, 2), ('bottleneck', 512, 11), ('bottleneck', 832, 2), ], } @tf.keras.utils.register_keras_serializable(package='Vision') class RevNet(tf.keras.Model): """Creates a Reversible ResNet (RevNet) family model. This implements: Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse. The Reversible Residual Network: Backpropagation Without Storing Activations. (https://arxiv.org/pdf/1707.04585.pdf) """ def __init__( self, model_id: int, input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec( shape=[None, None, None, 3]), activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a RevNet model. Args: model_id: An `int` of depth/id of ResNet backbone model. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. activation: A `str` name of the activation function. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ self._model_id = model_id self._input_specs = input_specs self._use_sync_bn = use_sync_bn self._activation = activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._norm = tf.keras.layers.BatchNormalization axis = -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1 # Build RevNet. inputs = tf.keras.Input(shape=input_specs.shape[1:]) x = tf.keras.layers.Conv2D( filters=REVNET_SPECS[model_id][0][1], kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer)(inputs) x = self._norm( axis=axis, momentum=norm_momentum, epsilon=norm_epsilon, synchronized=use_sync_bn)(x) x = tf_utils.get_activation(activation)(x) x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x) endpoints = {} for i, spec in enumerate(REVNET_SPECS[model_id]): if spec[0] == 'residual': inner_block_fn = nn_blocks.ResidualInner elif spec[0] == 'bottleneck': inner_block_fn = nn_blocks.BottleneckResidualInner else: raise ValueError('Block fn `{}` is not supported.'.format(spec[0])) if spec[1] % 2 != 0: raise ValueError('Number of output filters must be even to ensure ' 'splitting in channel dimension for reversible blocks') x = self._block_group( inputs=x, filters=spec[1], strides=(1 if i == 0 else 2), inner_block_fn=inner_block_fn, block_repeats=spec[2], batch_norm_first=(i != 0), # Only skip on first block name='revblock_group_{}'.format(i + 2)) endpoints[str(i + 2)] = x self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(RevNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs) def _block_group(self, inputs: tf.Tensor, filters: int, strides: int, inner_block_fn: Callable[..., tf.keras.layers.Layer], block_repeats: int, batch_norm_first: bool, name: str = 'revblock_group') -> tf.Tensor: """Creates one reversible block for RevNet model. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. filters: An `int` number of filters for the first convolution of the layer. strides: An `int` stride to use for the first convolution of the layer. If greater than 1, this block group will downsample the input. inner_block_fn: Either `nn_blocks.ResidualInner` or `nn_blocks.BottleneckResidualInner`. block_repeats: An `int` number of blocks contained in this block group. batch_norm_first: A `bool` that specifies whether to apply BatchNormalization and activation layer before feeding into convolution layers. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ x = inputs for i in range(block_repeats): is_first_block = i == 0 # Only first residual layer in block gets downsampled curr_strides = strides if is_first_block else 1 f = inner_block_fn( filters=filters // 2, strides=curr_strides, batch_norm_first=batch_norm_first and is_first_block, kernel_regularizer=self._kernel_regularizer) g = inner_block_fn( filters=filters // 2, strides=1, batch_norm_first=batch_norm_first and is_first_block, kernel_regularizer=self._kernel_regularizer) x = nn_blocks.ReversibleLayer(f, g)(x) return tf.identity(x, name=name) def get_config(self) -> Dict[str, Any]: config_dict = { 'model_id': self._model_id, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, } return config_dict @classmethod def from_config(cls, config: Dict[str, Any], custom_objects: Optional[Any] = None) -> tf.keras.Model: return cls(**config) @property def output_specs(self) -> Dict[int, tf.TensorShape]: """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs # pytype: disable=bad-return-type # trace-all-classes @factory.register_backbone_builder('revnet') def build_revnet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds RevNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'revnet', (f'Inconsistent backbone type ' f'{backbone_type}') return RevNet( model_id=backbone_cfg.model_id, input_specs=input_specs, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
8,787
36.716738
139
py
models
models-master/official/vision/modeling/backbones/factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Backbone registers and factory method. One can regitered a new backbone model by the following two steps: 1 Import the factory and register the build in the backbone file. 2 Import the backbone class and add a build in __init__.py. ``` # my_backbone.py from modeling.backbones import factory class MyBackbone(): ... @factory.register_backbone_builder('my_backbone') def build_my_backbone(): return MyBackbone() # backbones/__init__.py adds import from modeling.backbones.my_backbone import MyBackbone ``` If one wants the MyBackbone class to be used only by those binary then don't imported the backbone module in backbones/__init__.py, but import it in place that uses it. """ from typing import Sequence, Union # Import libraries import tensorflow as tf from official.core import registry from official.modeling import hyperparams _REGISTERED_BACKBONE_CLS = {} def register_backbone_builder(key: str): """Decorates a builder of backbone class. The builder should be a Callable (a class or a function). This decorator supports registration of backbone builder as follows: ``` class MyBackbone(tf.keras.Model): pass @register_backbone_builder('mybackbone') def builder(input_specs, config, l2_reg): return MyBackbone(...) # Builds a MyBackbone object. my_backbone = build_backbone_3d(input_specs, config, l2_reg) ``` Args: key: A `str` of key to look up the builder. Returns: A callable for using as class decorator that registers the decorated class for creation from an instance of task_config_cls. """ return registry.register(_REGISTERED_BACKBONE_CLS, key) def build_backbone(input_specs: Union[tf.keras.layers.InputSpec, Sequence[tf.keras.layers.InputSpec]], backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: tf.keras.regularizers.Regularizer = None, **kwargs) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Builds backbone from a config. Args: input_specs: A (sequence of) `tf.keras.layers.InputSpec` of input. backbone_config: A `OneOfConfig` of backbone config. norm_activation_config: A config for normalization/activation layer. l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to None. **kwargs: Additional keyword args to be passed to backbone builder. Returns: A `tf.keras.Model` instance of the backbone. """ backbone_builder = registry.lookup(_REGISTERED_BACKBONE_CLS, backbone_config.type) return backbone_builder( input_specs=input_specs, backbone_config=backbone_config, norm_activation_config=norm_activation_config, l2_regularizer=l2_regularizer, **kwargs)
3,494
29.929204
106
py
models
models-master/official/vision/modeling/backbones/resnet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for resnet.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.vision.modeling.backbones import resnet class ResNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 10, 1), (128, 18, 1), (128, 26, 1), (128, 34, 1), (128, 50, 4), (128, 101, 4), (128, 152, 4), ) def test_network_creation(self, input_size, model_id, endpoint_filter_scale): """Test creation of ResNet family models.""" resnet_params = { 10: 4915904, 18: 11190464, 26: 17465024, 34: 21306048, 50: 23561152, 101: 42605504, 152: 58295232, } tf.keras.backend.set_image_data_format('channels_last') network = resnet.ResNet(model_id=model_id) self.assertEqual(network.count_params(), resnet_params[model_id]) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) self.assertAllEqual( [1, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale], endpoints['2'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale], endpoints['3'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale], endpoints['4'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale], endpoints['5'].shape.as_list()) @combinations.generate( combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ], use_sync_bn=[False, True], )) def test_sync_bn_multiple_devices(self, strategy, use_sync_bn): """Test for sync bn on TPU and GPU devices.""" inputs = np.random.rand(64, 128, 128, 3) tf.keras.backend.set_image_data_format('channels_last') with strategy.scope(): network = resnet.ResNet(model_id=50, use_sync_bn=use_sync_bn) _ = network(inputs) @parameterized.parameters( (128, 34, 1, 'v0', None, 0.0, 1.0, False, False), (128, 34, 1, 'v1', 0.25, 0.2, 1.25, True, True), (128, 50, 4, 'v0', None, 0.0, 1.5, False, False), (128, 50, 4, 'v1', 0.25, 0.2, 2.0, True, True), ) def test_resnet_rs(self, input_size, model_id, endpoint_filter_scale, stem_type, se_ratio, init_stochastic_depth_rate, depth_multiplier, resnetd_shortcut, replace_stem_max_pool): """Test creation of ResNet family models.""" tf.keras.backend.set_image_data_format('channels_last') network = resnet.ResNet( model_id=model_id, depth_multiplier=depth_multiplier, stem_type=stem_type, resnetd_shortcut=resnetd_shortcut, replace_stem_max_pool=replace_stem_max_pool, se_ratio=se_ratio, init_stochastic_depth_rate=init_stochastic_depth_rate) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) _ = network(inputs) @parameterized.parameters(1, 3, 4) def test_input_specs(self, input_dim): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = resnet.ResNet(model_id=50, input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id=50, depth_multiplier=1.0, stem_type='v0', se_ratio=None, resnetd_shortcut=False, replace_stem_max_pool=False, init_stochastic_depth_rate=0.0, scale_stem=True, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, bn_trainable=True) network = resnet.ResNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = resnet.ResNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
5,545
34.101266
80
py
models
models-master/official/vision/modeling/backbones/spinenet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for SpineNet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import spinenet class SpineNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 0.65, 1, 0.5, 128, 4, 6), (256, 1.0, 1, 0.5, 256, 3, 6), (384, 1.0, 2, 0.5, 256, 4, 7), (512, 1.0, 3, 1.0, 256, 3, 7), (640, 1.3, 4, 1.0, 384, 3, 7), ) def test_network_creation(self, input_size, filter_size_scale, block_repeats, resample_alpha, endpoints_num_filters, min_level, max_level): """Test creation of SpineNet models.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec( shape=[None, input_size, input_size, 3]) model = spinenet.SpineNet( input_specs=input_specs, min_level=min_level, max_level=max_level, endpoints_num_filters=endpoints_num_filters, resample_alpha=resample_alpha, block_repeats=block_repeats, filter_size_scale=filter_size_scale, init_stochastic_depth_rate=0.2, ) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = model(inputs) for l in range(min_level, max_level + 1): self.assertIn(str(l), endpoints.keys()) self.assertAllEqual( [1, input_size / 2**l, input_size / 2**l, endpoints_num_filters], endpoints[str(l)].shape.as_list()) @parameterized.parameters( ((128, 128), (128, 128)), ((128, 128), (256, 256)), ((640, 640), (896, 1664)), ) def test_load_from_different_input_specs(self, input_size_1, input_size_2): """Test loading checkpoints with different input size.""" def build_spinenet(input_size): tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model = spinenet.SpineNet( input_specs=input_specs, min_level=3, max_level=7, endpoints_num_filters=384, resample_alpha=1.0, block_repeats=2, filter_size_scale=0.5) return model model_1 = build_spinenet(input_size_1) model_2 = build_spinenet(input_size_2) ckpt_1 = tf.train.Checkpoint(backbone=model_1) ckpt_2 = tf.train.Checkpoint(backbone=model_2) ckpt_path = self.get_temp_dir() + '/ckpt' ckpt_1.write(ckpt_path) ckpt_2.restore(ckpt_path).expect_partial() def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( min_level=3, max_level=7, endpoints_num_filters=256, resample_alpha=0.5, block_repeats=1, filter_size_scale=1.0, init_stochastic_depth_rate=0.2, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, ) network = spinenet.SpineNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = spinenet.SpineNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) @parameterized.parameters( ('relu', tf.nn.relu), ('swish', tf.nn.swish) ) def test_activation(self, activation, activation_fn): model = spinenet.SpineNet(activation=activation) self.assertEqual(model._activation_fn, activation_fn) def test_invalid_activation_raises_valurerror(self): with self.assertRaises(ValueError): spinenet.SpineNet(activation='invalid_activation_name') if __name__ == '__main__': tf.test.main()
4,724
32.75
79
py
models
models-master/official/vision/modeling/backbones/mobilenet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for MobileNet.""" import itertools import math # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import mobilenet class MobileNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetMultiMAXSeg', 'MobileNetV3SmallReducedFilters', ) def test_serialize_deserialize(self, model_id): # Create a network object that sets all of its config options. kwargs = dict( model_id=model_id, filter_size_scale=1.0, stochastic_depth_drop_rate=None, use_sync_bn=False, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, norm_momentum=0.99, norm_epsilon=0.001, output_stride=None, min_depth=8, divisible_by=8, regularize_depthwise=False, finegrain_classification_mode=True ) network = mobilenet.MobileNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = mobilenet.MobileNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) @parameterized.parameters( itertools.product( [1, 3], [ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetMultiMAXSeg', 'MobileNetV3SmallReducedFilters', ], )) def test_input_specs(self, input_dim, model_id): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = mobilenet.MobileNet(model_id=model_id, input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) @parameterized.parameters( itertools.product( [ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetV3SmallReducedFilters', ], [32, 224], )) def test_mobilenet_creation(self, model_id, input_size): """Test creation of MobileNet family models.""" tf.keras.backend.set_image_data_format('channels_last') mobilenet_layers = { # The number of filters of layers having outputs been collected # for filter_size_scale = 1.0 'MobileNetV1': [128, 256, 512, 1024], 'MobileNetV2': [24, 32, 96, 320], 'MobileNetV3Small': [16, 24, 48, 96], 'MobileNetV3Large': [24, 40, 112, 160], 'MobileNetV3EdgeTPU': [32, 48, 96, 192], 'MobileNetMultiMAX': [32, 64, 128, 160], 'MobileNetMultiAVG': [32, 64, 160, 192], 'MobileNetMultiAVGSeg': [32, 64, 160, 96], 'MobileNetMultiMAXSeg': [32, 64, 128, 96], 'MobileNetV3SmallReducedFilters': [16, 24, 48, 48], } network = mobilenet.MobileNet(model_id=model_id, filter_size_scale=1.0) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) for idx, num_filter in enumerate(mobilenet_layers[model_id]): self.assertAllEqual( [1, input_size / 2 ** (idx+2), input_size / 2 ** (idx+2), num_filter], endpoints[str(idx+2)].shape.as_list()) @parameterized.parameters( itertools.product( [ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetMultiMAXSeg', 'MobileNetV3SmallReducedFilters', ], [32, 224], )) def test_mobilenet_intermediate_layers(self, model_id, input_size): tf.keras.backend.set_image_data_format('channels_last') # Tests the mobilenet intermediate depthwise layers. mobilenet_depthwise_layers = { # The number of filters of depthwise layers having outputs been # collected for filter_size_scale = 1.0. Only tests the mobilenet # model with inverted bottleneck block using depthwise which excludes # MobileNetV1. 'MobileNetV1': [], 'MobileNetV2': [144, 192, 576, 960], 'MobileNetV3Small': [16, 88, 144, 576], 'MobileNetV3Large': [72, 120, 672, 960], 'MobileNetV3EdgeTPU': [None, None, 384, 1280], 'MobileNetMultiMAX': [96, 128, 384, 640], 'MobileNetMultiAVG': [64, 192, 640, 768], 'MobileNetMultiAVGSeg': [64, 192, 640, 384], 'MobileNetMultiMAXSeg': [96, 128, 384, 320], 'MobileNetV3SmallReducedFilters': [16, 88, 144, 288], } network = mobilenet.MobileNet(model_id=model_id, filter_size_scale=1.0, output_intermediate_endpoints=True) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) for idx, num_filter in enumerate(mobilenet_depthwise_layers[model_id]): # Not using depthwise conv in this layer. if num_filter is None: continue self.assertAllEqual( [1, input_size / 2**(idx + 2), input_size / 2**(idx + 2), num_filter], endpoints[str(idx + 2) + '/depthwise'].shape.as_list()) @parameterized.parameters( itertools.product( [ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetMultiMAXSeg', 'MobileNetV3SmallReducedFilters', ], [1.0, 0.75], )) def test_mobilenet_scaling(self, model_id, filter_size_scale): """Test for creation of a MobileNet classifier.""" mobilenet_params = { ('MobileNetV1', 1.0): 3228864, ('MobileNetV1', 0.75): 1832976, ('MobileNetV2', 1.0): 2257984, ('MobileNetV2', 0.75): 1382064, ('MobileNetV3Large', 1.0): 4226432, ('MobileNetV3Large', 0.75): 2731616, ('MobileNetV3Small', 1.0): 1529968, ('MobileNetV3Small', 0.75): 1026552, ('MobileNetV3EdgeTPU', 1.0): 2849312, ('MobileNetV3EdgeTPU', 0.75): 1737288, ('MobileNetMultiAVG', 1.0): 3704416, ('MobileNetMultiAVG', 0.75): 2349704, ('MobileNetMultiMAX', 1.0): 3174560, ('MobileNetMultiMAX', 0.75): 2045816, ('MobileNetMultiAVGSeg', 1.0): 2239840, ('MobileNetMultiAVGSeg', 0.75): 1395272, ('MobileNetMultiMAXSeg', 1.0): 1929088, ('MobileNetMultiMAXSeg', 0.75): 1216544, ('MobileNetV3SmallReducedFilters', 1.0): 694880, ('MobileNetV3SmallReducedFilters', 0.75): 505960, } input_size = 224 network = mobilenet.MobileNet(model_id=model_id, filter_size_scale=filter_size_scale) self.assertEqual(network.count_params(), mobilenet_params[(model_id, filter_size_scale)]) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) _ = network(inputs) @parameterized.parameters( itertools.product( [ 'MobileNetV1', 'MobileNetV2', 'MobileNetV3Large', 'MobileNetV3Small', 'MobileNetV3EdgeTPU', 'MobileNetMultiAVG', 'MobileNetMultiMAX', 'MobileNetMultiAVGSeg', 'MobileNetMultiMAXSeg', 'MobileNetV3SmallReducedFilters', ], [8, 16, 32], )) def test_mobilenet_output_stride(self, model_id, output_stride): """Test for creation of a MobileNet with different output strides.""" tf.keras.backend.set_image_data_format('channels_last') mobilenet_layers = { # The number of filters of the layers outputs been collected # for filter_size_scale = 1.0. 'MobileNetV1': 1024, 'MobileNetV2': 320, 'MobileNetV3Small': 96, 'MobileNetV3Large': 160, 'MobileNetV3EdgeTPU': 192, 'MobileNetMultiMAX': 160, 'MobileNetMultiAVG': 192, 'MobileNetMultiAVGSeg': 448, 'MobileNetMultiMAXSeg': 448, 'MobileNetV3SmallReducedFilters': 48, } network = mobilenet.MobileNet( model_id=model_id, filter_size_scale=1.0, output_stride=output_stride) level = int(math.log2(output_stride)) input_size = 224 inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) num_filter = mobilenet_layers[model_id] self.assertAllEqual( [1, input_size / output_stride, input_size / output_stride, num_filter], endpoints[str(level)].shape.as_list()) if __name__ == '__main__': tf.test.main()
10,596
34.441472
80
py
models
models-master/official/vision/modeling/backbones/resnet_3d.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of 3D Residual Networks.""" from typing import Callable, List, Tuple, Optional # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks_3d from official.vision.modeling.layers import nn_layers layers = tf.keras.layers RESNET_SPECS = { 50: [ ('bottleneck3d', 64, 3), ('bottleneck3d', 128, 4), ('bottleneck3d', 256, 6), ('bottleneck3d', 512, 3), ], 101: [ ('bottleneck3d', 64, 3), ('bottleneck3d', 128, 4), ('bottleneck3d', 256, 23), ('bottleneck3d', 512, 3), ], 152: [ ('bottleneck3d', 64, 3), ('bottleneck3d', 128, 8), ('bottleneck3d', 256, 36), ('bottleneck3d', 512, 3), ], 200: [ ('bottleneck3d', 64, 3), ('bottleneck3d', 128, 24), ('bottleneck3d', 256, 36), ('bottleneck3d', 512, 3), ], 270: [ ('bottleneck3d', 64, 4), ('bottleneck3d', 128, 29), ('bottleneck3d', 256, 53), ('bottleneck3d', 512, 4), ], 300: [ ('bottleneck3d', 64, 4), ('bottleneck3d', 128, 36), ('bottleneck3d', 256, 54), ('bottleneck3d', 512, 4), ], 350: [ ('bottleneck3d', 64, 4), ('bottleneck3d', 128, 36), ('bottleneck3d', 256, 72), ('bottleneck3d', 512, 4), ], } @tf.keras.utils.register_keras_serializable(package='Vision') class ResNet3D(tf.keras.Model): """Creates a 3D ResNet family model.""" def __init__( self, model_id: int, temporal_strides: List[int], temporal_kernel_sizes: List[Tuple[int]], use_self_gating: Optional[List[int]] = None, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, None, 3]), stem_type: str = 'v0', stem_conv_temporal_kernel_size: int = 5, stem_conv_temporal_stride: int = 2, stem_pool_temporal_stride: int = 2, init_stochastic_depth_rate: float = 0.0, activation: str = 'relu', se_ratio: Optional[float] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a 3D ResNet model. Args: model_id: An `int` of depth of ResNet backbone model. temporal_strides: A list of integers that specifies the temporal strides for all 3d blocks. temporal_kernel_sizes: A list of tuples that specifies the temporal kernel sizes for all 3d blocks in different block groups. use_self_gating: A list of booleans to specify applying self-gating module or not in each block group. If None, self-gating is not applied. input_specs: A `tf.keras.layers.InputSpec` of the input tensor. stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to `v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187). stem_conv_temporal_kernel_size: An `int` of temporal kernel size for the first conv layer. stem_conv_temporal_stride: An `int` of temporal stride for the first conv layer. stem_pool_temporal_stride: An `int` of temporal stride for the first pool layer. init_stochastic_depth_rate: A `float` of initial stochastic depth rate. activation: A `str` of name of the activation function. se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A str for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. **kwargs: Additional keyword arguments to be passed. """ self._model_id = model_id self._temporal_strides = temporal_strides self._temporal_kernel_sizes = temporal_kernel_sizes self._input_specs = input_specs self._stem_type = stem_type self._stem_conv_temporal_kernel_size = stem_conv_temporal_kernel_size self._stem_conv_temporal_stride = stem_conv_temporal_stride self._stem_pool_temporal_stride = stem_pool_temporal_stride self._use_self_gating = use_self_gating self._se_ratio = se_ratio self._init_stochastic_depth_rate = init_stochastic_depth_rate self._use_sync_bn = use_sync_bn self._activation = activation self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = layers.BatchNormalization self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 # Build ResNet3D backbone. inputs = tf.keras.Input(shape=input_specs.shape[1:]) endpoints = self._build_model(inputs) self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(ResNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs) def _build_model(self, inputs): """Builds model architecture. Args: inputs: the keras input spec. Returns: endpoints: A dictionary of backbone endpoint features. """ # Build stem. x = self._build_stem(inputs, stem_type=self._stem_type) temporal_kernel_size = 1 if self._stem_pool_temporal_stride == 1 else 3 x = layers.MaxPool3D( pool_size=[temporal_kernel_size, 3, 3], strides=[self._stem_pool_temporal_stride, 2, 2], padding='same')(x) # Build intermediate blocks and endpoints. resnet_specs = RESNET_SPECS[self._model_id] if len(self._temporal_strides) != len(resnet_specs) or len( self._temporal_kernel_sizes) != len(resnet_specs): raise ValueError( 'Number of blocks in temporal specs should equal to resnet_specs.') endpoints = {} for i, resnet_spec in enumerate(resnet_specs): if resnet_spec[0] == 'bottleneck3d': block_fn = nn_blocks_3d.BottleneckBlock3D else: raise ValueError('Block fn `{}` is not supported.'.format( resnet_spec[0])) use_self_gating = ( self._use_self_gating[i] if self._use_self_gating else False) x = self._block_group( inputs=x, filters=resnet_spec[1], temporal_kernel_sizes=self._temporal_kernel_sizes[i], temporal_strides=self._temporal_strides[i], spatial_strides=(1 if i == 0 else 2), block_fn=block_fn, block_repeats=resnet_spec[2], stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate( self._init_stochastic_depth_rate, i + 2, 5), use_self_gating=use_self_gating, name='block_group_l{}'.format(i + 2)) endpoints[str(i + 2)] = x return endpoints def _build_stem(self, inputs, stem_type): """Builds stem layer.""" # Build stem. if stem_type == 'v0': x = layers.Conv3D( filters=64, kernel_size=[self._stem_conv_temporal_kernel_size, 7, 7], strides=[self._stem_conv_temporal_stride, 2, 2], use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)(x) x = tf_utils.get_activation(self._activation)(x) elif stem_type == 'v1': x = layers.Conv3D( filters=32, kernel_size=[self._stem_conv_temporal_kernel_size, 3, 3], strides=[self._stem_conv_temporal_stride, 2, 2], use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( inputs) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)(x) x = tf_utils.get_activation(self._activation)(x) x = layers.Conv3D( filters=32, kernel_size=[1, 3, 3], strides=[1, 1, 1], use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)(x) x = tf_utils.get_activation(self._activation)(x) x = layers.Conv3D( filters=64, kernel_size=[1, 3, 3], strides=[1, 1, 1], use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)( x) x = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn)(x) x = tf_utils.get_activation(self._activation)(x) else: raise ValueError(f'Stem type {stem_type} not supported.') return x def _block_group(self, inputs: tf.Tensor, filters: int, temporal_kernel_sizes: Tuple[int], temporal_strides: int, spatial_strides: int, block_fn: Callable[ ..., tf.keras.layers.Layer] = nn_blocks_3d.BottleneckBlock3D, block_repeats: int = 1, stochastic_depth_drop_rate: float = 0.0, use_self_gating: bool = False, name: str = 'block_group'): """Creates one group of blocks for the ResNet3D model. Args: inputs: A `tf.Tensor` of size `[batch, channels, height, width]`. filters: An `int` of number of filters for the first convolution of the layer. temporal_kernel_sizes: A tuple that specifies the temporal kernel sizes for each block in the current group. temporal_strides: An `int` of temporal strides for the first convolution in this group. spatial_strides: An `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`. block_repeats: An `int` of number of blocks contained in the layer. stochastic_depth_drop_rate: A `float` of drop rate of the current block group. use_self_gating: A `bool` that specifies whether to apply self-gating module or not. name: A `str` name for the block. Returns: The output `tf.Tensor` of the block layer. """ if len(temporal_kernel_sizes) != block_repeats: raise ValueError( 'Number of elements in `temporal_kernel_sizes` must equal to `block_repeats`.' ) # Only apply self-gating module in the last block. use_self_gating_list = [False] * (block_repeats - 1) + [use_self_gating] x = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_sizes[0], temporal_strides=temporal_strides, spatial_strides=spatial_strides, stochastic_depth_drop_rate=stochastic_depth_drop_rate, use_self_gating=use_self_gating_list[0], se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( inputs) for i in range(1, block_repeats): x = block_fn( filters=filters, temporal_kernel_size=temporal_kernel_sizes[i], temporal_strides=1, spatial_strides=1, stochastic_depth_drop_rate=stochastic_depth_drop_rate, use_self_gating=use_self_gating_list[i], se_ratio=self._se_ratio, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)( x) return tf.identity(x, name=name) def get_config(self): config_dict = { 'model_id': self._model_id, 'temporal_strides': self._temporal_strides, 'temporal_kernel_sizes': self._temporal_kernel_sizes, 'stem_type': self._stem_type, 'stem_conv_temporal_kernel_size': self._stem_conv_temporal_kernel_size, 'stem_conv_temporal_stride': self._stem_conv_temporal_stride, 'stem_pool_temporal_stride': self._stem_pool_temporal_stride, 'use_self_gating': self._use_self_gating, 'se_ratio': self._se_ratio, 'init_stochastic_depth_rate': self._init_stochastic_depth_rate, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('resnet_3d') def build_resnet3d( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds ResNet 3d backbone from a config.""" backbone_cfg = backbone_config.get() # Flatten configs before passing to the backbone. temporal_strides = [] temporal_kernel_sizes = [] use_self_gating = [] for block_spec in backbone_cfg.block_specs: temporal_strides.append(block_spec.temporal_strides) temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes) use_self_gating.append(block_spec.use_self_gating) return ResNet3D( model_id=backbone_cfg.model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, use_self_gating=use_self_gating, input_specs=input_specs, stem_type=backbone_cfg.stem_type, stem_conv_temporal_kernel_size=backbone_cfg .stem_conv_temporal_kernel_size, stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride, stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, se_ratio=backbone_cfg.se_ratio, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer) @factory.register_backbone_builder('resnet_3d_rs') def build_resnet3d_rs( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds ResNet-3D-RS backbone from a config.""" backbone_cfg = backbone_config.get() # Flatten configs before passing to the backbone. temporal_strides = [] temporal_kernel_sizes = [] use_self_gating = [] for i, block_spec in enumerate(backbone_cfg.block_specs): temporal_strides.append(block_spec.temporal_strides) use_self_gating.append(block_spec.use_self_gating) block_repeats_i = RESNET_SPECS[backbone_cfg.model_id][i][-1] temporal_kernel_sizes.append(list(block_spec.temporal_kernel_sizes) * block_repeats_i) return ResNet3D( model_id=backbone_cfg.model_id, temporal_strides=temporal_strides, temporal_kernel_sizes=temporal_kernel_sizes, use_self_gating=use_self_gating, input_specs=input_specs, stem_type=backbone_cfg.stem_type, stem_conv_temporal_kernel_size=backbone_cfg .stem_conv_temporal_kernel_size, stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride, stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride, init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate, se_ratio=backbone_cfg.se_ratio, activation=norm_activation_config.activation, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
18,647
37.931106
88
py
models
models-master/official/vision/modeling/backbones/mobilenet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of MobileNet Networks.""" import dataclasses from typing import Optional, Dict, Any, Tuple # Import libraries import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.vision.modeling.backbones import factory from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers # pylint: disable=pointless-string-statement @tf.keras.utils.register_keras_serializable(package='Vision') class Conv2DBNBlock(tf.keras.layers.Layer): """A convolution block with batch normalization.""" def __init__( self, filters: int, kernel_size: int = 3, strides: int = 1, use_bias: bool = False, use_explicit_padding: bool = False, activation: str = 'relu6', kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, use_normalization: bool = True, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, **kwargs): """A convolution block with batch normalization. Args: filters: An `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. kernel_size: An `int` specifying the height and width of the 2D convolution window. strides: An `int` of block stride. If greater than 1, this block will ultimately downsample the input. use_bias: If True, use bias in the convolution layer. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. activation: A `str` name of the activation function. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. use_normalization: If True, use batch normalization. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. **kwargs: Additional keyword arguments to be passed. """ super(Conv2DBNBlock, self).__init__(**kwargs) self._filters = filters self._kernel_size = kernel_size self._strides = strides self._activation = activation self._use_bias = use_bias self._use_explicit_padding = use_explicit_padding self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_normalization = use_normalization self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._norm = tf.keras.layers.BatchNormalization if use_explicit_padding and kernel_size > 1: self._padding = 'valid' else: self._padding = 'same' if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 def get_config(self): config = { 'filters': self._filters, 'strides': self._strides, 'kernel_size': self._kernel_size, 'use_bias': self._use_bias, 'use_explicit_padding': self._use_explicit_padding, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'activation': self._activation, 'use_sync_bn': self._use_sync_bn, 'use_normalization': self._use_normalization, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon } base_config = super(Conv2DBNBlock, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): if self._use_explicit_padding and self._kernel_size > 1: padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size) self._pad = tf.keras.layers.ZeroPadding2D(padding_size) self._conv0 = tf.keras.layers.Conv2D( filters=self._filters, kernel_size=self._kernel_size, strides=self._strides, padding=self._padding, use_bias=self._use_bias, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer) if self._use_normalization: self._norm0 = self._norm( axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, synchronized=self._use_sync_bn) self._activation_layer = tf_utils.get_activation( self._activation, use_keras_layer=True) super(Conv2DBNBlock, self).build(input_shape) def call(self, inputs, training=None): if self._use_explicit_padding and self._kernel_size > 1: inputs = self._pad(inputs) x = self._conv0(inputs) if self._use_normalization: x = self._norm0(x) return self._activation_layer(x) """ Architecture: https://arxiv.org/abs/1704.04861. "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam """ MNV1_BLOCK_SPECS = { 'spec_name': 'MobileNetV1', 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 32, False), ('depsepconv', 3, 1, 64, False), ('depsepconv', 3, 2, 128, False), ('depsepconv', 3, 1, 128, True), ('depsepconv', 3, 2, 256, False), ('depsepconv', 3, 1, 256, True), ('depsepconv', 3, 2, 512, False), ('depsepconv', 3, 1, 512, False), ('depsepconv', 3, 1, 512, False), ('depsepconv', 3, 1, 512, False), ('depsepconv', 3, 1, 512, False), ('depsepconv', 3, 1, 512, True), ('depsepconv', 3, 2, 1024, False), ('depsepconv', 3, 1, 1024, True), ] } """ Architecture: https://arxiv.org/abs/1801.04381 "MobileNetV2: Inverted Residuals and Linear Bottlenecks" Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen """ MNV2_BLOCK_SPECS = { 'spec_name': 'MobileNetV2', 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'expand_ratio', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 32, None, False), ('invertedbottleneck', 3, 1, 16, 1., False), ('invertedbottleneck', 3, 2, 24, 6., False), ('invertedbottleneck', 3, 1, 24, 6., True), ('invertedbottleneck', 3, 2, 32, 6., False), ('invertedbottleneck', 3, 1, 32, 6., False), ('invertedbottleneck', 3, 1, 32, 6., True), ('invertedbottleneck', 3, 2, 64, 6., False), ('invertedbottleneck', 3, 1, 64, 6., False), ('invertedbottleneck', 3, 1, 64, 6., False), ('invertedbottleneck', 3, 1, 64, 6., False), ('invertedbottleneck', 3, 1, 96, 6., False), ('invertedbottleneck', 3, 1, 96, 6., False), ('invertedbottleneck', 3, 1, 96, 6., True), ('invertedbottleneck', 3, 2, 160, 6., False), ('invertedbottleneck', 3, 1, 160, 6., False), ('invertedbottleneck', 3, 1, 160, 6., False), ('invertedbottleneck', 3, 1, 320, 6., True), ('convbn', 1, 1, 1280, None, False), ] } """ Architecture: https://arxiv.org/abs/1905.02244 "Searching for MobileNetV3" Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam """ MNV3Large_BLOCK_SPECS = { 'spec_name': 'MobileNetV3Large', 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 16, 'hard_swish', None, None, True, False, False), ('invertedbottleneck', 3, 1, 16, 'relu', None, 1., None, False, False), ('invertedbottleneck', 3, 2, 24, 'relu', None, 4., None, False, False), ('invertedbottleneck', 3, 1, 24, 'relu', None, 3., None, False, True), ('invertedbottleneck', 5, 2, 40, 'relu', 0.25, 3., None, False, False), ('invertedbottleneck', 5, 1, 40, 'relu', 0.25, 3., None, False, False), ('invertedbottleneck', 5, 1, 40, 'relu', 0.25, 3., None, False, True), ('invertedbottleneck', 3, 2, 80, 'hard_swish', None, 6., None, False, False), ('invertedbottleneck', 3, 1, 80, 'hard_swish', None, 2.5, None, False, False), ('invertedbottleneck', 3, 1, 80, 'hard_swish', None, 2.3, None, False, False), ('invertedbottleneck', 3, 1, 80, 'hard_swish', None, 2.3, None, False, False), ('invertedbottleneck', 3, 1, 112, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 3, 1, 112, 'hard_swish', 0.25, 6., None, False, True), ('invertedbottleneck', 5, 2, 160, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 160, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 160, 'hard_swish', 0.25, 6., None, False, True), ('convbn', 1, 1, 960, 'hard_swish', None, None, True, False, False), ('gpooling', None, None, None, None, None, None, None, None, False), ('convbn', 1, 1, 1280, 'hard_swish', None, None, False, True, False), ] } MNV3Small_BLOCK_SPECS = { 'spec_name': 'MobileNetV3Small', 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 16, 'hard_swish', None, None, True, False, False), ('invertedbottleneck', 3, 2, 16, 'relu', 0.25, 1, None, False, True), ('invertedbottleneck', 3, 2, 24, 'relu', None, 72. / 16, None, False, False), ('invertedbottleneck', 3, 1, 24, 'relu', None, 88. / 24, None, False, True), ('invertedbottleneck', 5, 2, 40, 'hard_swish', 0.25, 4., None, False, False), ('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3., None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3., None, False, True), ('invertedbottleneck', 5, 2, 96, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 96, 'hard_swish', 0.25, 6., None, False, False), ('invertedbottleneck', 5, 1, 96, 'hard_swish', 0.25, 6., None, False, True), ('convbn', 1, 1, 576, 'hard_swish', None, None, True, False, False), ('gpooling', None, None, None, None, None, None, None, None, False), ('convbn', 1, 1, 1024, 'hard_swish', None, None, False, True, False), ] } """ The EdgeTPU version is taken from github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v3.py """ MNV3EdgeTPU_BLOCK_SPECS = { 'spec_name': 'MobileNetV3EdgeTPU', 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'use_residual', 'use_depthwise', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 32, 'relu', None, None, None, None, False), ('invertedbottleneck', 3, 1, 16, 'relu', None, 1., True, False, False), ('invertedbottleneck', 3, 2, 32, 'relu', None, 8., True, False, False), ('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, False), ('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, False), ('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, True), ('invertedbottleneck', 3, 2, 48, 'relu', None, 8., True, False, False), ('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, False), ('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, False), ('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, True), ('invertedbottleneck', 3, 2, 96, 'relu', None, 8., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 8., False, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, True), ('invertedbottleneck', 5, 2, 160, 'relu', None, 8., True, True, False), ('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False), ('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False), ('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False), ('invertedbottleneck', 3, 1, 192, 'relu', None, 8., True, True, True), ('convbn', 1, 1, 1280, 'relu', None, None, None, None, False), ] } """ Architecture: https://arxiv.org/pdf/2008.08178.pdf "Discovering Multi-Hardware Mobile Models via Architecture Search" Grace Chu, Okan Arikan, Gabriel Bender, Weijun Wang, Achille Brighton, Pieter-Jan Kindermans, Hanxiao Liu, Berkin Akin, Suyog Gupta, and Andrew Howard """ MNMultiMAX_BLOCK_SPECS = { 'spec_name': 'MobileNetMultiMAX', 'block_spec_schema': [ 'block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output' ], 'block_specs': [ ('convbn', 3, 2, 32, 'relu', None, True, False, False), ('invertedbottleneck', 3, 2, 32, 'relu', 3., None, False, True), ('invertedbottleneck', 5, 2, 64, 'relu', 6., None, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, True), ('invertedbottleneck', 5, 2, 128, 'relu', 6., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 4., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 6., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, True), ('invertedbottleneck', 3, 2, 160, 'relu', 6., None, False, False), ('invertedbottleneck', 5, 1, 160, 'relu', 4., None, False, False), ('invertedbottleneck', 3, 1, 160, 'relu', 5., None, False, False), ('invertedbottleneck', 5, 1, 160, 'relu', 4., None, False, True), ('convbn', 1, 1, 960, 'relu', None, True, False, False), ('gpooling', None, None, None, None, None, None, None, False), # Remove bias and add batch norm for the last layer to support QAT # and achieve slightly better accuracy. ('convbn', 1, 1, 1280, 'relu', None, True, False, False), ] } MNMultiAVG_BLOCK_SPECS = { 'spec_name': 'MobileNetMultiAVG', 'block_spec_schema': [ 'block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output' ], 'block_specs': [ ('convbn', 3, 2, 32, 'relu', None, True, False, False), ('invertedbottleneck', 3, 2, 32, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 32, 'relu', 2., None, False, True), ('invertedbottleneck', 5, 2, 64, 'relu', 5., None, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 3., None, False, True), ('invertedbottleneck', 5, 2, 128, 'relu', 6., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False), ('invertedbottleneck', 3, 1, 160, 'relu', 6., None, False, False), ('invertedbottleneck', 3, 1, 160, 'relu', 4., None, False, True), ('invertedbottleneck', 3, 2, 192, 'relu', 6., None, False, False), ('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, False), ('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, False), ('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, True), ('convbn', 1, 1, 960, 'relu', None, True, False, False), ('gpooling', None, None, None, None, None, None, None, False), # Remove bias and add batch norm for the last layer to support QAT # and achieve slightly better accuracy. ('convbn', 1, 1, 1280, 'relu', None, True, False, False), ] } # Similar to MobileNetMultiAVG and used for segmentation task. # Reduced the filters by a factor of 2 in the last block. MNMultiAVG_SEG_BLOCK_SPECS = { 'spec_name': 'MobileNetMultiAVGSeg', 'block_spec_schema': [ 'block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output' ], 'block_specs': [ ('convbn', 3, 2, 32, 'relu', None, True, False, False), ('invertedbottleneck', 3, 2, 32, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 32, 'relu', 2., True, False, True), ('invertedbottleneck', 5, 2, 64, 'relu', 5., True, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 3., True, False, True), ('invertedbottleneck', 5, 2, 128, 'relu', 6., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 160, 'relu', 6., True, False, False), ('invertedbottleneck', 3, 1, 160, 'relu', 4., True, False, True), ('invertedbottleneck', 3, 2, 192, 'relu', 6., True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu', 2., True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu', 4., True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu', 4., True, False, True), ('convbn', 1, 1, 448, 'relu', None, True, False, True), ('gpooling', None, None, None, None, None, None, None, False), # Remove bias and add batch norm for the last layer to support QAT # and achieve slightly better accuracy. ('convbn', 1, 1, 1280, 'relu', None, True, False, False), ] } # Similar to MobileNetMultiMax and used for segmentation task. # Reduced the filters by a factor of 2 in the last block. MNMultiMAX_SEG_BLOCK_SPECS = { 'spec_name': 'MobileNetMultiMAXSeg', 'block_spec_schema': [ 'block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output' ], 'block_specs': [ ('convbn', 3, 2, 32, 'relu', None, True, False, False), ('invertedbottleneck', 3, 2, 32, 'relu', 3., True, False, True), ('invertedbottleneck', 5, 2, 64, 'relu', 6., True, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, False), ('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, True), ('invertedbottleneck', 5, 2, 128, 'relu', 6., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 4., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 6., True, False, False), ('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, True), ('invertedbottleneck', 3, 2, 160, 'relu', 6., True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu', 2., True, False, False), ('invertedbottleneck', 3, 1, 96, 'relu', 4., True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu', 320.0 / 96, True, False, True), ('convbn', 1, 1, 448, 'relu', None, True, False, True), ('gpooling', None, None, None, None, None, None, None, False), # Remove bias and add batch norm for the last layer to support QAT # and achieve slightly better accuracy. ('convbn', 1, 1, 1280, 'relu', None, True, False, False), ] } # A smaller MNV3Small, with reduced filters for the last few layers MNV3SmallReducedFilters = { 'spec_name': 'MobilenetV3SmallReducedFilters', 'block_spec_schema': [ 'block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output' ], 'block_specs': [ ('convbn', 3, 2, 16, 'hard_swish', None, None, True, False, False), ('invertedbottleneck', 3, 2, 16, 'relu', 0.25, 1, None, False, True), ('invertedbottleneck', 3, 2, 24, 'relu', None, 72. / 16, None, False, False), ('invertedbottleneck', 3, 1, 24, 'relu', None, 88. / 24, None, False, True), ('invertedbottleneck', 5, 2, 40, 'hard_swish', 0.25, 4, None, False, False), ('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6, None, False, False), ('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6, None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3, None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3, None, False, True), # Layers below are different from MobileNetV3Small and have # half as many filters ('invertedbottleneck', 5, 2, 48, 'hard_swish', 0.25, 3, None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 6, None, False, False), ('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 6, None, False, True), ('convbn', 1, 1, 288, 'hard_swish', None, None, True, False, False), ('gpooling', None, None, None, None, None, None, None, None, False), ('convbn', 1, 1, 1024, 'hard_swish', None, None, False, True, False), ] } SUPPORTED_SPECS_MAP = { 'MobileNetV1': MNV1_BLOCK_SPECS, 'MobileNetV2': MNV2_BLOCK_SPECS, 'MobileNetV3Large': MNV3Large_BLOCK_SPECS, 'MobileNetV3Small': MNV3Small_BLOCK_SPECS, 'MobileNetV3EdgeTPU': MNV3EdgeTPU_BLOCK_SPECS, 'MobileNetMultiMAX': MNMultiMAX_BLOCK_SPECS, 'MobileNetMultiAVG': MNMultiAVG_BLOCK_SPECS, 'MobileNetMultiAVGSeg': MNMultiAVG_SEG_BLOCK_SPECS, 'MobileNetMultiMAXSeg': MNMultiMAX_SEG_BLOCK_SPECS, 'MobileNetV3SmallReducedFilters': MNV3SmallReducedFilters, } @dataclasses.dataclass class BlockSpec(hyperparams.Config): """A container class that specifies the block configuration for MobileNet.""" block_fn: str = 'convbn' kernel_size: int = 3 strides: int = 1 filters: int = 32 use_bias: bool = False use_normalization: bool = True activation: str = 'relu6' # Used for block type InvertedResConv. expand_ratio: Optional[float] = 6. # Used for block type InvertedResConv with SE. se_ratio: Optional[float] = None use_depthwise: bool = True use_residual: bool = True is_output: bool = True def block_spec_decoder( specs: Dict[Any, Any], filter_size_scale: float, # Set to 1 for mobilenetv1. divisible_by: int = 8, finegrain_classification_mode: bool = True): """Decodes specs for a block. Args: specs: A `dict` specification of block specs of a mobilenet version. filter_size_scale: A `float` multiplier for the filter size for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. finegrain_classification_mode: If True, the model will keep the last layer large even for small multipliers, following https://arxiv.org/abs/1801.04381. Returns: A list of `BlockSpec` that defines structure of the base network. """ spec_name = specs['spec_name'] block_spec_schema = specs['block_spec_schema'] block_specs = specs['block_specs'] if not block_specs: raise ValueError( 'The block spec cannot be empty for {} !'.format(spec_name)) if len(block_specs[0]) != len(block_spec_schema): raise ValueError('The block spec values {} do not match with ' 'the schema {}'.format(block_specs[0], block_spec_schema)) decoded_specs = [] for s in block_specs: kw_s = dict(zip(block_spec_schema, s)) decoded_specs.append(BlockSpec(**kw_s)) # This adjustment applies to V2 and V3 if (spec_name != 'MobileNetV1' and finegrain_classification_mode and filter_size_scale < 1.0): decoded_specs[-1].filters /= filter_size_scale # pytype: disable=annotation-type-mismatch for ds in decoded_specs: if ds.filters: ds.filters = nn_layers.round_filters(filters=ds.filters, multiplier=filter_size_scale, divisor=divisible_by, min_depth=8) return decoded_specs @tf.keras.utils.register_keras_serializable(package='Vision') class MobileNet(tf.keras.Model): """Creates a MobileNet family model.""" def __init__( self, model_id: str = 'MobileNetV2', filter_size_scale: float = 1.0, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), # The followings are for hyper-parameter tuning. norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, # The followings should be kept the same most of the times. output_stride: Optional[int] = None, min_depth: int = 8, # divisible is not used in MobileNetV1. divisible_by: int = 8, stochastic_depth_drop_rate: float = 0.0, regularize_depthwise: bool = False, use_sync_bn: bool = False, # finegrain is not used in MobileNetV1. finegrain_classification_mode: bool = True, output_intermediate_endpoints: bool = False, **kwargs): """Initializes a MobileNet model. Args: model_id: A `str` of MobileNet version. The supported values are `MobileNetV1`, `MobileNetV2`, `MobileNetV3Large`, `MobileNetV3Small`, `MobileNetV3EdgeTPU`, `MobileNetMultiMAX` and `MobileNetMultiAVG`. filter_size_scale: A `float` of multiplier for the filters (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. output_stride: An `int` that specifies the requested ratio of input to output spatial resolution. If not None, then we invoke atrous convolution if necessary to prevent the network from reducing the spatial resolution of activation maps. Allowed values are 8 (accurate fully convolutional mode), 16 (fast fully convolutional mode), 32 (classification mode). min_depth: An `int` of minimum depth (number of channels) for all convolution ops. Enforced when filter_size_scale < 1, and not an active constraint when filter_size_scale >= 1. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer. regularize_depthwise: If Ture, apply regularization on depthwise. use_sync_bn: If True, use synchronized batch normalization. finegrain_classification_mode: If True, the model will keep the last layer large even for small multipliers, following https://arxiv.org/abs/1801.04381. output_intermediate_endpoints: A `bool` of whether or not output the intermediate endpoints. **kwargs: Additional keyword arguments to be passed. """ if model_id not in SUPPORTED_SPECS_MAP: raise ValueError('The MobileNet version {} ' 'is not supported'.format(model_id)) if filter_size_scale <= 0: raise ValueError('filter_size_scale is not greater than zero.') if output_stride is not None: if model_id == 'MobileNetV1': if output_stride not in [8, 16, 32]: raise ValueError('Only allowed output_stride values are 8, 16, 32.') else: if output_stride == 0 or (output_stride > 1 and output_stride % 2): raise ValueError('Output stride must be None, 1 or a multiple of 2.') self._model_id = model_id self._input_specs = input_specs self._filter_size_scale = filter_size_scale self._min_depth = min_depth self._output_stride = output_stride self._divisible_by = divisible_by self._stochastic_depth_drop_rate = stochastic_depth_drop_rate self._regularize_depthwise = regularize_depthwise self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._finegrain_classification_mode = finegrain_classification_mode self._output_intermediate_endpoints = output_intermediate_endpoints inputs = tf.keras.Input(shape=input_specs.shape[1:]) block_specs = SUPPORTED_SPECS_MAP.get(model_id) self._decoded_specs = block_spec_decoder( specs=block_specs, filter_size_scale=self._filter_size_scale, divisible_by=self._get_divisible_by(), finegrain_classification_mode=self._finegrain_classification_mode) x, endpoints, next_endpoint_level = self._mobilenet_base(inputs=inputs) self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} # Don't include the final layer in `self._output_specs` to support decoders. endpoints[str(next_endpoint_level)] = x super(MobileNet, self).__init__( inputs=inputs, outputs=endpoints, **kwargs) def _get_divisible_by(self): if self._model_id == 'MobileNetV1': return 1 else: return self._divisible_by def _mobilenet_base(self, inputs: tf.Tensor ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor], int]: """Builds the base MobileNet architecture. Args: inputs: A `tf.Tensor` of shape `[batch_size, height, width, channels]`. Returns: A tuple of output Tensor and dictionary that collects endpoints. """ input_shape = inputs.get_shape().as_list() if len(input_shape) != 4: raise ValueError('Expected rank 4 input, was: %d' % len(input_shape)) # The current_stride variable keeps track of the output stride of the # activations, i.e., the running product of convolution strides up to the # current network layer. This allows us to invoke atrous convolution # whenever applying the next convolution would result in the activations # having output stride larger than the target output_stride. current_stride = 1 # The atrous convolution rate parameter. rate = 1 net = inputs endpoints = {} endpoint_level = 2 for i, block_def in enumerate(self._decoded_specs): block_name = 'block_group_{}_{}'.format(block_def.block_fn, i) # A small catch for gpooling block with None strides if not block_def.strides: block_def.strides = 1 if (self._output_stride is not None and current_stride == self._output_stride): # If we have reached the target output_stride, then we need to employ # atrous convolution with stride=1 and multiply the atrous rate by the # current unit's stride for use in subsequent layers. layer_stride = 1 layer_rate = rate rate *= block_def.strides else: layer_stride = block_def.strides layer_rate = 1 current_stride *= block_def.strides intermediate_endpoints = {} if block_def.block_fn == 'convbn': net = Conv2DBNBlock( filters=block_def.filters, kernel_size=block_def.kernel_size, strides=block_def.strides, activation=block_def.activation, use_bias=block_def.use_bias, use_normalization=block_def.use_normalization, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(net) elif block_def.block_fn == 'depsepconv': net = nn_blocks.DepthwiseSeparableConvBlock( filters=block_def.filters, kernel_size=block_def.kernel_size, strides=layer_stride, activation=block_def.activation, dilation_rate=layer_rate, regularize_depthwise=self._regularize_depthwise, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, )(net) elif block_def.block_fn == 'invertedbottleneck': use_rate = rate if layer_rate > 1 and block_def.kernel_size != 1: # We will apply atrous rate in the following cases: # 1) When kernel_size is not in params, the operation then uses # default kernel size 3x3. # 2) When kernel_size is in params, and if the kernel_size is not # equal to (1, 1) (there is no need to apply atrous convolution to # any 1x1 convolution). use_rate = layer_rate in_filters = net.shape.as_list()[-1] block = nn_blocks.InvertedBottleneckBlock( in_filters=in_filters, out_filters=block_def.filters, kernel_size=block_def.kernel_size, strides=layer_stride, expand_ratio=block_def.expand_ratio, se_ratio=block_def.se_ratio, expand_se_in_filters=True, se_gating_activation='hard_sigmoid', activation=block_def.activation, use_depthwise=block_def.use_depthwise, use_residual=block_def.use_residual, dilation_rate=use_rate, regularize_depthwise=self._regularize_depthwise, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, stochastic_depth_drop_rate=self._stochastic_depth_drop_rate, divisible_by=self._get_divisible_by(), output_intermediate_endpoints=self._output_intermediate_endpoints, ) if self._output_intermediate_endpoints: net, intermediate_endpoints = block(net) else: net = block(net) elif block_def.block_fn == 'gpooling': net = layers.GlobalAveragePooling2D(keepdims=True)(net) else: raise ValueError('Unknown block type {} for layer {}'.format( block_def.block_fn, i)) net = tf.keras.layers.Activation('linear', name=block_name)(net) if block_def.is_output: endpoints[str(endpoint_level)] = net for key, tensor in intermediate_endpoints.items(): endpoints[str(endpoint_level) + '/' + key] = tensor if current_stride != self._output_stride: endpoint_level += 1 if str(endpoint_level) in endpoints: endpoint_level += 1 return net, endpoints, endpoint_level def get_config(self): config_dict = { 'model_id': self._model_id, 'filter_size_scale': self._filter_size_scale, 'min_depth': self._min_depth, 'output_stride': self._output_stride, 'divisible_by': self._divisible_by, 'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate, 'regularize_depthwise': self._regularize_depthwise, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, 'finegrain_classification_mode': self._finegrain_classification_mode, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('mobilenet') def build_mobilenet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds MobileNet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'mobilenet', (f'Inconsistent backbone type ' f'{backbone_type}') return MobileNet( model_id=backbone_cfg.model_id, filter_size_scale=backbone_cfg.filter_size_scale, input_specs=input_specs, stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate, output_stride=backbone_cfg.output_stride, output_intermediate_endpoints=backbone_cfg.output_intermediate_endpoints, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
40,965
42.860814
94
py
models
models-master/official/vision/modeling/backbones/revnet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RevNet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import revnet class RevNetTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 56, 4), (128, 104, 4), ) def test_network_creation(self, input_size, model_id, endpoint_filter_scale): """Test creation of RevNet family models.""" tf.keras.backend.set_image_data_format('channels_last') network = revnet.RevNet(model_id=model_id) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = network(inputs) network.summary() self.assertAllEqual( [1, input_size / 2**2, input_size / 2**2, 128 * endpoint_filter_scale], endpoints['2'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**3, input_size / 2**3, 256 * endpoint_filter_scale], endpoints['3'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**4, input_size / 2**4, 512 * endpoint_filter_scale], endpoints['4'].shape.as_list()) self.assertAllEqual( [1, input_size / 2**5, input_size / 2**5, 832 * endpoint_filter_scale], endpoints['5'].shape.as_list()) @parameterized.parameters(1, 3, 4) def test_input_specs(self, input_dim): """Test different input feature dimensions.""" tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim]) network = revnet.RevNet(model_id=56, input_specs=input_specs) inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1) _ = network(inputs) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( model_id=56, activation='relu', use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, ) network = revnet.RevNet(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = revnet.RevNet.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,215
33.956522
80
py
models
models-master/official/vision/modeling/backbones/spinenet_mobile_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SpineNet.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.modeling.backbones import spinenet_mobile class SpineNetMobileTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( (128, 0.6, 1, 0.0, 24), (128, 0.65, 1, 0.2, 40), (256, 1.0, 1, 0.2, 48), ) def test_network_creation(self, input_size, filter_size_scale, block_repeats, se_ratio, endpoints_num_filters): """Test creation of SpineNet models.""" min_level = 3 max_level = 7 tf.keras.backend.set_image_data_format('channels_last') input_specs = tf.keras.layers.InputSpec( shape=[None, input_size, input_size, 3]) model = spinenet_mobile.SpineNetMobile( input_specs=input_specs, min_level=min_level, max_level=max_level, endpoints_num_filters=endpoints_num_filters, resample_alpha=se_ratio, block_repeats=block_repeats, filter_size_scale=filter_size_scale, init_stochastic_depth_rate=0.2, ) inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1) endpoints = model(inputs) for l in range(min_level, max_level + 1): self.assertIn(str(l), endpoints.keys()) self.assertAllEqual( [1, input_size / 2**l, input_size / 2**l, endpoints_num_filters], endpoints[str(l)].shape.as_list()) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( min_level=3, max_level=7, endpoints_num_filters=256, se_ratio=0.2, expand_ratio=6, block_repeats=1, filter_size_scale=1.0, init_stochastic_depth_rate=0.2, use_sync_bn=False, activation='relu', norm_momentum=0.99, norm_epsilon=0.001, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, use_keras_upsampling_2d=False, ) network = spinenet_mobile.SpineNetMobile(**kwargs) expected_config = dict(kwargs) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = spinenet_mobile.SpineNetMobile.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
3,948
34.258929
80
py
models
models-master/official/vision/modeling/backbones/mobiledet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definitions of MobileDet Networks.""" import dataclasses from typing import Any, Dict, Optional, Tuple, List import tensorflow as tf from official.modeling import hyperparams from official.vision.modeling.backbones import factory from official.vision.modeling.backbones import mobilenet from official.vision.modeling.layers import nn_blocks from official.vision.modeling.layers import nn_layers layers = tf.keras.layers # pylint: disable=pointless-string-statement """ Architecture: https://arxiv.org/abs/2004.14525. "MobileDets: Searching for Object Detection Architectures for Mobile Accelerators" Yunyang Xiong, Hanxiao Liu, Suyog Gupta, Berkin Akin, Gabriel Bender, Yongzhe Wang, Pieter-Jan Kindermans, Mingxing Tan, Vikas Singh, Bo Chen Note that `round_down_protection` flag should be set to false for scaling of the network. """ MD_CPU_BLOCK_SPECS = { 'spec_name': 'MobileDetCPU', # [expand_ratio] is set to 1 and [use_residual] is set to false # for inverted_bottleneck_no_expansion # [se_ratio] is set to 0.25 for all inverted_bottleneck layers # [activation] is set to 'hard_swish' for all applicable layers 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'use_residual', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 16, 'hard_swish', None, None, None, False), # inverted_bottleneck_no_expansion ('invertedbottleneck', 3, 1, 8, 'hard_swish', 0.25, 1., False, True), ('invertedbottleneck', 3, 2, 16, 'hard_swish', 0.25, 4., False, True), ('invertedbottleneck', 3, 2, 32, 'hard_swish', 0.25, 8., False, False), ('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, True), ('invertedbottleneck', 5, 2, 72, 'hard_swish', 0.25, 8., False, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False), ('invertedbottleneck', 5, 1, 72, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., False, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False), ('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, True), ('invertedbottleneck', 5, 2, 104, 'hard_swish', 0.25, 8., False, False), ('invertedbottleneck', 5, 1, 104, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 5, 1, 104, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 104, 'hard_swish', 0.25, 4., True, False), ('invertedbottleneck', 3, 1, 144, 'hard_swish', 0.25, 8., False, True), ] } MD_DSP_BLOCK_SPECS = { 'spec_name': 'MobileDetDSP', # [expand_ratio] is set to 1 and [use_residual] is set to false # for inverted_bottleneck_no_expansion # [use_depthwise] is set to False for fused_conv # [se_ratio] is set to None for all inverted_bottleneck layers # [activation] is set to 'relu6' for all applicable layers 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'input_compression_ratio', 'output_compression_ratio', 'use_depthwise', 'use_residual', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 32, 'relu6', None, None, None, None, None, None, False), # inverted_bottleneck_no_expansion ('invertedbottleneck', 3, 1, 24, 'relu6', None, 1., None, None, True, False, True), ('invertedbottleneck', 3, 2, 32, 'relu6', None, 4., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 32, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 32, 'relu6', None, 4., None, None, True, True, False), ('tucker', 3, 1, 32, 'relu6', None, None, 0.25, 0.75, None, True, True), ('invertedbottleneck', 3, 2, 64, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 64, 'relu6', None, 4., None, None, True, True, False), ('invertedbottleneck', 3, 1, 64, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 64, 'relu6', None, 4., None, None, False, True, True), # fused_conv ('invertedbottleneck', 3, 2, 120, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 120, 'relu6', None, 4., None, None, True, True, False), ('invertedbottleneck', 3, 1, 120, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 3, 1, 120, 'relu6', None, 8., None, None, True, True, False), ('invertedbottleneck', 3, 1, 144, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 144, 'relu6', None, 8., None, None, True, True, False), ('invertedbottleneck', 3, 1, 144, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 3, 1, 144, 'relu6', None, 8., None, None, True, True, True), ('invertedbottleneck', 3, 2, 160, 'relu6', None, 4, None, None, True, False, False), ('invertedbottleneck', 3, 1, 160, 'relu6', None, 4, None, None, True, True, False), ('invertedbottleneck', 3, 1, 160, 'relu6', None, 4., None, None, False, False, False), # fused_conv ('tucker', 3, 1, 160, 'relu6', None, None, 0.75, 0.75, None, True, False), ('invertedbottleneck', 3, 1, 240, 'relu6', None, 8, None, None, True, False, True), ] } MD_EdgeTPU_BLOCK_SPECS = { 'spec_name': 'MobileDetEdgeTPU', # [use_depthwise] is set to False for fused_conv # [se_ratio] is set to None for all inverted_bottleneck layers # [activation] is set to 'relu6' for all applicable layers 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'input_compression_ratio', 'output_compression_ratio', 'use_depthwise', 'use_residual', 'is_output'], 'block_specs': [ ('convbn', 3, 2, 32, 'relu6', None, None, None, None, None, None, False), ('tucker', 3, 1, 16, 'relu6', None, None, 0.25, 0.75, None, False, True), ('invertedbottleneck', 3, 2, 16, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 16, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 16, 'relu6', None, 8., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 16, 'relu6', None, 4., None, None, False, True, True), # fused_conv ('invertedbottleneck', 5, 2, 40, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 40, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 40, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 40, 'relu6', None, 4., None, None, False, True, True), # fused_conv ('invertedbottleneck', 3, 2, 72, 'relu6', None, 8, None, None, True, False, False), ('invertedbottleneck', 3, 1, 72, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 3, 1, 72, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 72, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 5, 1, 96, 'relu6', None, 8, None, None, True, False, False), ('invertedbottleneck', 5, 1, 96, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 3, 1, 96, 'relu6', None, 8, None, None, True, True, True), ('invertedbottleneck', 5, 2, 120, 'relu6', None, 8, None, None, True, False, False), ('invertedbottleneck', 3, 1, 120, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 5, 1, 120, 'relu6', None, 4, None, None, True, True, False), ('invertedbottleneck', 3, 1, 120, 'relu6', None, 8, None, None, True, True, False), ('invertedbottleneck', 5, 1, 384, 'relu6', None, 8, None, None, True, False, True), ] } MD_GPU_BLOCK_SPECS = { 'spec_name': 'MobileDetGPU', # [use_depthwise] is set to False for fused_conv # [se_ratio] is set to None for all inverted_bottleneck layers # [activation] is set to 'relu6' for all applicable layers 'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters', 'activation', 'se_ratio', 'expand_ratio', 'input_compression_ratio', 'output_compression_ratio', 'use_depthwise', 'use_residual', 'is_output'], 'block_specs': [ # block 0 ('convbn', 3, 2, 32, 'relu6', None, None, None, None, None, None, False), # block 1 ('tucker', 3, 1, 16, 'relu6', None, None, 0.25, 0.25, None, False, True), # block 2 ('invertedbottleneck', 3, 2, 32, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('tucker', 3, 1, 32, 'relu6', None, None, 0.25, 0.25, None, True, False), ('tucker', 3, 1, 32, 'relu6', None, None, 0.25, 0.25, None, True, False), ('tucker', 3, 1, 32, 'relu6', None, None, 0.25, 0.25, None, True, True), # block 3 ('invertedbottleneck', 3, 2, 64, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 64, 'relu6', None, 8., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 64, 'relu6', None, 8., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 64, 'relu6', None, 4., None, None, False, True, True), # fused_conv # block 4 ('invertedbottleneck', 3, 2, 128, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv # block 5 ('invertedbottleneck', 3, 1, 128, 'relu6', None, 8., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 8., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 8., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 8., None, None, False, True, True), # fused_conv # block 6 ('invertedbottleneck', 3, 2, 128, 'relu6', None, 4., None, None, False, False, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv ('invertedbottleneck', 3, 1, 128, 'relu6', None, 4., None, None, False, True, False), # fused_conv # block 7 ('invertedbottleneck', 3, 1, 384, 'relu6', None, 8, None, None, True, False, True), ] } SUPPORTED_SPECS_MAP = { 'MobileDetCPU': MD_CPU_BLOCK_SPECS, 'MobileDetDSP': MD_DSP_BLOCK_SPECS, 'MobileDetEdgeTPU': MD_EdgeTPU_BLOCK_SPECS, 'MobileDetGPU': MD_GPU_BLOCK_SPECS, } @dataclasses.dataclass class BlockSpec(hyperparams.Config): """A container class that specifies the block configuration for MobileDet.""" block_fn: str = 'convbn' kernel_size: int = 3 strides: int = 1 filters: int = 32 use_bias: bool = False use_normalization: bool = True activation: str = 'relu6' is_output: bool = True # Used for block type InvertedResConv and TuckerConvBlock. use_residual: bool = True # Used for block type InvertedResConv only. use_depthwise: bool = True expand_ratio: Optional[float] = 8. se_ratio: Optional[float] = None # Used for block type TuckerConvBlock only. input_compression_ratio: Optional[float] = None output_compression_ratio: Optional[float] = None def block_spec_decoder( specs: Dict[Any, Any], filter_size_scale: float, divisible_by: int = 8) -> List[BlockSpec]: """Decodes specs for a block. Args: specs: A `dict` specification of block specs of a mobiledet version. filter_size_scale: A `float` multiplier for the filter size for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. Returns: A list of `BlockSpec` that defines structure of the base network. """ spec_name = specs['spec_name'] block_spec_schema = specs['block_spec_schema'] block_specs = specs['block_specs'] if not block_specs: raise ValueError( 'The block spec cannot be empty for {} !'.format(spec_name)) if len(block_specs[0]) != len(block_spec_schema): raise ValueError('The block spec values {} do not match with ' 'the schema {}'.format(block_specs[0], block_spec_schema)) decoded_specs = [] for s in block_specs: kw_s = dict(zip(block_spec_schema, s)) decoded_specs.append(BlockSpec(**kw_s)) for ds in decoded_specs: if ds.filters: ds.filters = nn_layers.round_filters(filters=ds.filters, multiplier=filter_size_scale, divisor=divisible_by, round_down_protect=False, min_depth=8) return decoded_specs @tf.keras.utils.register_keras_serializable(package='Vision') class MobileDet(tf.keras.Model): """Creates a MobileDet family model.""" def __init__( self, model_id: str = 'MobileDetCPU', filter_size_scale: float = 1.0, input_specs: tf.keras.layers.InputSpec = layers.InputSpec( shape=[None, None, None, 3]), # The followings are for hyper-parameter tuning. norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_initializer: str = 'VarianceScaling', kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, # The followings should be kept the same most of the times. min_depth: int = 8, divisible_by: int = 8, regularize_depthwise: bool = False, use_sync_bn: bool = False, **kwargs): """Initializes a MobileDet model. Args: model_id: A `str` of MobileDet version. The supported values are `MobileDetCPU`, `MobileDetDSP`, `MobileDetEdgeTPU`, `MobileDetGPU`. filter_size_scale: A `float` of multiplier for the filters (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_initializer: A `str` for kernel initializer of convolutional layers. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default to None. min_depth: An `int` of minimum depth (number of channels) for all convolution ops. Enforced when filter_size_scale < 1, and not an active constraint when filter_size_scale >= 1. divisible_by: An `int` that ensures all inner dimensions are divisible by this number. regularize_depthwise: If Ture, apply regularization on depthwise. use_sync_bn: If True, use synchronized batch normalization. **kwargs: Additional keyword arguments to be passed. """ if model_id not in SUPPORTED_SPECS_MAP: raise ValueError('The MobileDet version {} ' 'is not supported'.format(model_id)) if filter_size_scale <= 0: raise ValueError('filter_size_scale is not greater than zero.') self._model_id = model_id self._input_specs = input_specs self._filter_size_scale = filter_size_scale self._min_depth = min_depth self._divisible_by = divisible_by self._regularize_depthwise = regularize_depthwise self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_regularizer = bias_regularizer self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon inputs = tf.keras.Input(shape=input_specs.shape[1:]) block_specs = SUPPORTED_SPECS_MAP.get(model_id) self._decoded_specs = block_spec_decoder( specs=block_specs, filter_size_scale=self._filter_size_scale, divisible_by=self._get_divisible_by()) x, endpoints, next_endpoint_level = self._mobiledet_base(inputs=inputs) self._output_specs = {l: endpoints[l].get_shape() for l in endpoints} super(MobileDet, self).__init__( inputs=inputs, outputs=endpoints, **kwargs) def _get_divisible_by(self): return self._divisible_by def _mobiledet_base(self, inputs: tf.Tensor ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor], int]: """Builds the base MobileDet architecture. Args: inputs: A `tf.Tensor` of shape `[batch_size, height, width, channels]`. Returns: A tuple of output Tensor and dictionary that collects endpoints. """ input_shape = inputs.get_shape().as_list() if len(input_shape) != 4: raise ValueError('Expected rank 4 input, was: %d' % len(input_shape)) net = inputs endpoints = {} endpoint_level = 1 for i, block_def in enumerate(self._decoded_specs): block_name = 'block_group_{}_{}'.format(block_def.block_fn, i) if block_def.block_fn == 'convbn': net = mobilenet.Conv2DBNBlock( filters=block_def.filters, kernel_size=block_def.kernel_size, strides=block_def.strides, activation=block_def.activation, use_bias=block_def.use_bias, use_normalization=block_def.use_normalization, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon )(net) elif block_def.block_fn == 'invertedbottleneck': in_filters = net.shape.as_list()[-1] net = nn_blocks.InvertedBottleneckBlock( in_filters=in_filters, out_filters=block_def.filters, kernel_size=block_def.kernel_size, strides=block_def.strides, expand_ratio=block_def.expand_ratio, se_ratio=block_def.se_ratio, se_inner_activation=block_def.activation, se_gating_activation='sigmoid', se_round_down_protect=False, expand_se_in_filters=True, activation=block_def.activation, use_depthwise=block_def.use_depthwise, use_residual=block_def.use_residual, regularize_depthwise=self._regularize_depthwise, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, divisible_by=self._get_divisible_by() )(net) elif block_def.block_fn == 'tucker': in_filters = net.shape.as_list()[-1] net = nn_blocks.TuckerConvBlock( in_filters=in_filters, out_filters=block_def.filters, kernel_size=block_def.kernel_size, strides=block_def.strides, input_compression_ratio=block_def.input_compression_ratio, output_compression_ratio=block_def.output_compression_ratio, activation=block_def.activation, use_residual=block_def.use_residual, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, divisible_by=self._get_divisible_by() )(net) else: raise ValueError('Unknown block type {} for layer {}'.format( block_def.block_fn, i)) net = tf.keras.layers.Activation('linear', name=block_name)(net) if block_def.is_output: endpoints[str(endpoint_level)] = net endpoint_level += 1 return net, endpoints, endpoint_level def get_config(self): config_dict = { 'model_id': self._model_id, 'filter_size_scale': self._filter_size_scale, 'min_depth': self._min_depth, 'divisible_by': self._divisible_by, 'regularize_depthwise': self._regularize_depthwise, 'kernel_initializer': self._kernel_initializer, 'kernel_regularizer': self._kernel_regularizer, 'bias_regularizer': self._bias_regularizer, 'use_sync_bn': self._use_sync_bn, 'norm_momentum': self._norm_momentum, 'norm_epsilon': self._norm_epsilon, } return config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def output_specs(self): """A dict of {level: TensorShape} pairs for the model output.""" return self._output_specs @factory.register_backbone_builder('mobiledet') def build_mobiledet( input_specs: tf.keras.layers.InputSpec, backbone_config: hyperparams.Config, norm_activation_config: hyperparams.Config, l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None ) -> tf.keras.Model: """Builds MobileDet backbone from a config.""" backbone_type = backbone_config.type backbone_cfg = backbone_config.get() assert backbone_type == 'mobiledet', (f'Inconsistent backbone type ' f'{backbone_type}') return MobileDet( model_id=backbone_cfg.model_id, filter_size_scale=backbone_cfg.filter_size_scale, input_specs=input_specs, use_sync_bn=norm_activation_config.use_sync_bn, norm_momentum=norm_activation_config.norm_momentum, norm_epsilon=norm_activation_config.norm_epsilon, kernel_regularizer=l2_regularizer)
24,886
41.908621
80
py
models
models-master/official/vision/modeling/heads/segmentation_heads.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of segmentation heads.""" from typing import List, Union, Optional, Mapping, Tuple, Any import tensorflow as tf from official.modeling import tf_utils from official.vision.modeling.layers import nn_layers from official.vision.ops import spatial_transform_ops class MaskScoring(tf.keras.Model): """Creates a mask scoring layer. This implements mask scoring layer from the paper: Zhaojin Huang, Lichao Huang, Yongchao Gong, Chang Huang, Xinggang Wang. Mask Scoring R-CNN. (https://arxiv.org/pdf/1903.00241.pdf) """ def __init__( self, num_classes: int, fc_input_size: List[int], num_convs: int = 3, num_filters: int = 256, use_depthwise_convolution: bool = False, fc_dims: int = 1024, num_fcs: int = 2, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes mask scoring layer. Args: num_classes: An `int` for number of classes. fc_input_size: A List of `int` for the input size of the fully connected layers. num_convs: An`int` for number of conv layers. num_filters: An `int` for the number of filters for conv layers. use_depthwise_convolution: A `bool`, whether or not using depthwise convs. fc_dims: An `int` number of filters for each fully connected layers. num_fcs: An `int` for number of fully connected layers. activation: A `str` name of the activation function. use_sync_bn: A bool, whether or not to use sync batch normalization. norm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99. norm_epsilon: A float for the epsilon value in BatchNorm. Defaults to 0.001. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super(MaskScoring, self).__init__(**kwargs) self._config_dict = { 'num_classes': num_classes, 'num_convs': num_convs, 'num_filters': num_filters, 'fc_input_size': fc_input_size, 'fc_dims': fc_dims, 'num_fcs': num_fcs, 'use_sync_bn': use_sync_bn, 'use_depthwise_convolution': use_depthwise_convolution, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'activation': activation, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the mask scoring head.""" conv_op = tf.keras.layers.Conv2D conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', } conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) bn_op = tf.keras.layers.BatchNormalization bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], 'synchronized': self._config_dict['use_sync_bn'], } self._convs = [] self._conv_norms = [] for i in range(self._config_dict['num_convs']): if self._config_dict['use_depthwise_convolution']: self._convs.append( tf.keras.layers.DepthwiseConv2D( name='mask-scoring-depthwise-conv-{}'.format(i), kernel_size=3, padding='same', use_bias=False, depthwise_initializer=tf.keras.initializers.RandomNormal( stddev=0.01), depthwise_regularizer=self._config_dict['kernel_regularizer'], depth_multiplier=1)) norm_name = 'mask-scoring-depthwise-bn-{}'.format(i) self._conv_norms.append(bn_op(name=norm_name, **bn_kwargs)) conv_name = 'mask-scoring-conv-{}'.format(i) if 'kernel_initializer' in conv_kwargs: conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer( conv_kwargs['kernel_initializer']) if self._config_dict['use_depthwise_convolution']: conv_kwargs['kernel_size'] = 1 self._convs.append(conv_op(name=conv_name, **conv_kwargs)) bn_name = 'mask-scoring-bn-{}'.format(i) self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._fcs = [] self._fc_norms = [] for i in range(self._config_dict['num_fcs']): fc_name = 'mask-scoring-fc-{}'.format(i) self._fcs.append( tf.keras.layers.Dense( units=self._config_dict['fc_dims'], kernel_initializer=tf.keras.initializers.VarianceScaling( scale=1 / 3.0, mode='fan_out', distribution='uniform'), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name=fc_name)) bn_name = 'mask-scoring-fc-bn-{}'.format(i) self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._classifier = tf.keras.layers.Dense( units=self._config_dict['num_classes'], kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), bias_initializer=tf.zeros_initializer(), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name='iou-scores') super(MaskScoring, self).build(input_shape) def call(self, inputs: tf.Tensor, training: bool = None): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Forward pass mask scoring head. Args: inputs: A `tf.Tensor` of the shape [batch_size, width, size, num_classes], representing the segmentation logits. training: a `bool` indicating whether it is in `training` mode. Returns: mask_scores: A `tf.Tensor` of predicted mask scores [batch_size, num_classes]. """ x = tf.stop_gradient(inputs) for conv, bn in zip(self._convs, self._conv_norms): x = conv(x) x = bn(x) x = self._activation(x) # Casts feat to float32 so the resize op can be run on TPU. x = tf.cast(x, tf.float32) x = tf.image.resize(x, size=self._config_dict['fc_input_size'], method=tf.image.ResizeMethod.BILINEAR) # Casts it back to be compatible with the rest opetations. x = tf.cast(x, inputs.dtype) _, h, w, filters = x.get_shape().as_list() x = tf.reshape(x, [-1, h * w * filters]) for fc, bn in zip(self._fcs, self._fc_norms): x = fc(x) x = bn(x) x = self._activation(x) ious = self._classifier(x) return ious def get_config(self) -> Mapping[str, Any]: return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Vision') class SegmentationHead(tf.keras.layers.Layer): """Creates a segmentation head.""" def __init__( self, num_classes: int, level: Union[int, str], num_convs: int = 2, num_filters: int = 256, use_depthwise_convolution: bool = False, prediction_kernel_size: int = 1, upsample_factor: int = 1, feature_fusion: Optional[str] = None, decoder_min_level: Optional[int] = None, decoder_max_level: Optional[int] = None, low_level: int = 2, low_level_num_filters: int = 48, num_decoder_filters: int = 256, activation: str = 'relu', logit_activation: Optional[str] = None, use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a segmentation head. Args: num_classes: An `int` number of mask classification categories. The number of classes does not include background class. level: An `int` or `str`, level to use to build segmentation head. num_convs: An `int` number of stacked convolution before the last prediction layer. num_filters: An `int` number to specify the number of filters used. Default is 256. use_depthwise_convolution: A bool to specify if use depthwise separable convolutions. prediction_kernel_size: An `int` number to specify the kernel size of the prediction layer. upsample_factor: An `int` number to specify the upsampling factor to generate finer mask. Default 1 means no upsampling is applied. feature_fusion: One of the constants in nn_layers.FeatureFusion, namely `deeplabv3plus`, `pyramid_fusion`, `panoptic_fpn_fusion`, `deeplabv3plus_sum_to_merge`, or None. If `deeplabv3plus`, features from decoder_features[level] will be fused with low level feature maps from backbone. If `pyramid_fusion`, multiscale features will be resized and fused at the target level. decoder_min_level: An `int` of minimum level from decoder to use in feature fusion. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. decoder_max_level: An `int` of maximum level from decoder to use in feature fusion. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. low_level: An `int` of backbone level to be used for feature fusion. It is used when feature_fusion is set to `deeplabv3plus` or `deeplabv3plus_sum_to_merge`. low_level_num_filters: An `int` of reduced number of filters for the low level features before fusing it with higher level features. It is only used when feature_fusion is set to `deeplabv3plus` or `deeplabv3plus_sum_to_merge`. num_decoder_filters: An `int` of number of filters in the decoder outputs. It is only used when feature_fusion is set to `panoptic_fpn_fusion`. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. logit_activation: Activation applied to the final classifier layer logits, e.g. 'sigmoid', 'softmax'. Can be useful in cases when the task does not use only cross entropy loss. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super(SegmentationHead, self).__init__(**kwargs) self._config_dict = { 'num_classes': num_classes, 'level': level, 'num_convs': num_convs, 'num_filters': num_filters, 'use_depthwise_convolution': use_depthwise_convolution, 'prediction_kernel_size': prediction_kernel_size, 'upsample_factor': upsample_factor, 'feature_fusion': feature_fusion, 'decoder_min_level': decoder_min_level, 'decoder_max_level': decoder_max_level, 'low_level': low_level, 'low_level_num_filters': low_level_num_filters, 'num_decoder_filters': num_decoder_filters, 'activation': activation, 'logit_activation': logit_activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the segmentation head.""" use_depthwise_convolution = self._config_dict['use_depthwise_convolution'] conv_op = tf.keras.layers.Conv2D bn_op = tf.keras.layers.BatchNormalization bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], 'synchronized': self._config_dict['use_sync_bn'], } if self._config_dict['feature_fusion'] in {'deeplabv3plus', 'deeplabv3plus_sum_to_merge'}: # Deeplabv3+ feature fusion layers. self._dlv3p_conv = conv_op( kernel_size=1, padding='same', use_bias=False, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), kernel_regularizer=self._config_dict['kernel_regularizer'], name='segmentation_head_deeplabv3p_fusion_conv', filters=self._config_dict['low_level_num_filters']) self._dlv3p_norm = bn_op( name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs) elif self._config_dict['feature_fusion'] == 'panoptic_fpn_fusion': self._panoptic_fpn_fusion = nn_layers.PanopticFPNFusion( min_level=self._config_dict['decoder_min_level'], max_level=self._config_dict['decoder_max_level'], target_level=self._config_dict['level'], num_filters=self._config_dict['num_filters'], num_fpn_filters=self._config_dict['num_decoder_filters'], activation=self._config_dict['activation'], kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer']) # Segmentation head layers. self._convs = [] self._norms = [] for i in range(self._config_dict['num_convs']): if use_depthwise_convolution: self._convs.append( tf.keras.layers.DepthwiseConv2D( name='segmentation_head_depthwise_conv_{}'.format(i), kernel_size=3, padding='same', use_bias=False, depthwise_initializer=tf.keras.initializers.RandomNormal( stddev=0.01), depthwise_regularizer=self._config_dict['kernel_regularizer'], depth_multiplier=1)) norm_name = 'segmentation_head_depthwise_norm_{}'.format(i) self._norms.append(bn_op(name=norm_name, **bn_kwargs)) conv_name = 'segmentation_head_conv_{}'.format(i) self._convs.append( conv_op( name=conv_name, filters=self._config_dict['num_filters'], kernel_size=3 if not use_depthwise_convolution else 1, padding='same', use_bias=False, kernel_initializer=tf.keras.initializers.RandomNormal( stddev=0.01), kernel_regularizer=self._config_dict['kernel_regularizer'])) norm_name = 'segmentation_head_norm_{}'.format(i) self._norms.append(bn_op(name=norm_name, **bn_kwargs)) self._classifier = conv_op( name='segmentation_output', filters=self._config_dict['num_classes'], kernel_size=self._config_dict['prediction_kernel_size'], padding='same', activation=self._config_dict['logit_activation'], bias_initializer=tf.zeros_initializer(), kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer']) super().build(input_shape) def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]], Union[tf.Tensor, Mapping[str, tf.Tensor]]]): """Forward pass of the segmentation head. It supports both a tuple of 2 tensors or 2 dictionaries. The first is backbone endpoints, and the second is decoder endpoints. When inputs are tensors, they are from a single level of feature maps. When inputs are dictionaries, they contain multiple levels of feature maps, where the key is the index of feature map. Args: inputs: A tuple of 2 feature map tensors of shape [batch, height_l, width_l, channels] or 2 dictionaries of tensors: - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor` of the feature map tensors, whose shape is [batch, height_l, width_l, channels]. The first is backbone endpoints, and the second is decoder endpoints. Returns: segmentation prediction mask: A `tf.Tensor` of the segmentation mask scores predicted from input features. """ backbone_output = inputs[0] decoder_output = inputs[1] if self._config_dict['feature_fusion'] in {'deeplabv3plus', 'deeplabv3plus_sum_to_merge'}: # deeplabv3+ feature fusion x = decoder_output[str(self._config_dict['level'])] if isinstance( decoder_output, dict) else decoder_output y = backbone_output[str(self._config_dict['low_level'])] if isinstance( backbone_output, dict) else backbone_output y = self._dlv3p_norm(self._dlv3p_conv(y)) y = self._activation(y) x = tf.image.resize( x, tf.shape(y)[1:3], method=tf.image.ResizeMethod.BILINEAR) x = tf.cast(x, dtype=y.dtype) if self._config_dict['feature_fusion'] == 'deeplabv3plus': x = tf.concat([x, y], axis=self._bn_axis) else: x = tf.keras.layers.Add()([x, y]) elif self._config_dict['feature_fusion'] == 'pyramid_fusion': if not isinstance(decoder_output, dict): raise ValueError('Only support dictionary decoder_output.') x = nn_layers.pyramid_feature_fusion(decoder_output, self._config_dict['level']) elif self._config_dict['feature_fusion'] == 'panoptic_fpn_fusion': x = self._panoptic_fpn_fusion(decoder_output) else: x = decoder_output[str(self._config_dict['level'])] if isinstance( decoder_output, dict) else decoder_output for conv, norm in zip(self._convs, self._norms): x = conv(x) x = norm(x) x = self._activation(x) if self._config_dict['upsample_factor'] > 1: x = spatial_transform_ops.nearest_upsampling( x, scale=self._config_dict['upsample_factor']) return self._classifier(x) def get_config(self): base_config = super().get_config() return dict(list(base_config.items()) + list(self._config_dict.items())) @classmethod def from_config(cls, config): return cls(**config)
20,174
41.653277
134
py
models
models-master/official/vision/modeling/heads/instance_heads.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of instance prediction heads.""" from typing import List, Union, Optional # Import libraries import tensorflow as tf from official.modeling import tf_utils @tf.keras.utils.register_keras_serializable(package='Vision') class DetectionHead(tf.keras.layers.Layer): """Creates a detection head.""" def __init__( self, num_classes: int, num_convs: int = 0, num_filters: int = 256, use_separable_conv: bool = False, num_fcs: int = 2, fc_dims: int = 1024, class_agnostic_bbox_pred: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a detection head. Args: num_classes: An `int` for the number of classes. num_convs: An `int` number that represents the number of the intermediate convolution layers before the FC layers. num_filters: An `int` number that represents the number of filters of the intermediate convolution layers. use_separable_conv: A `bool` that indicates whether the separable convolution layers is used. num_fcs: An `int` number that represents the number of FC layers before the predictions. fc_dims: An `int` number that represents the number of dimension of the FC layers. class_agnostic_bbox_pred: `bool`, indicating whether bboxes should be predicted for every class or not. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super(DetectionHead, self).__init__(**kwargs) self._config_dict = { 'num_classes': num_classes, 'num_convs': num_convs, 'num_filters': num_filters, 'use_separable_conv': use_separable_conv, 'num_fcs': num_fcs, 'fc_dims': fc_dims, 'class_agnostic_bbox_pred': class_agnostic_bbox_pred, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the head.""" conv_op = (tf.keras.layers.SeparableConv2D if self._config_dict['use_separable_conv'] else tf.keras.layers.Conv2D) conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', } if self._config_dict['use_separable_conv']: conv_kwargs.update({ 'depthwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'pointwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'depthwise_regularizer': self._config_dict['kernel_regularizer'], 'pointwise_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) else: conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) bn_op = tf.keras.layers.BatchNormalization bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], 'synchronized': self._config_dict['use_sync_bn'], } self._convs = [] self._conv_norms = [] for i in range(self._config_dict['num_convs']): conv_name = 'detection-conv_{}'.format(i) if 'kernel_initializer' in conv_kwargs: conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer( conv_kwargs['kernel_initializer']) self._convs.append(conv_op(name=conv_name, **conv_kwargs)) bn_name = 'detection-conv-bn_{}'.format(i) self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._fcs = [] self._fc_norms = [] for i in range(self._config_dict['num_fcs']): fc_name = 'detection-fc_{}'.format(i) self._fcs.append( tf.keras.layers.Dense( units=self._config_dict['fc_dims'], kernel_initializer=tf.keras.initializers.VarianceScaling( scale=1 / 3.0, mode='fan_out', distribution='uniform'), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name=fc_name)) bn_name = 'detection-fc-bn_{}'.format(i) self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._classifier = tf.keras.layers.Dense( units=self._config_dict['num_classes'], kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), bias_initializer=tf.zeros_initializer(), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name='detection-scores') num_box_outputs = (4 if self._config_dict['class_agnostic_bbox_pred'] else self._config_dict['num_classes'] * 4) self._box_regressor = tf.keras.layers.Dense( units=num_box_outputs, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001), bias_initializer=tf.zeros_initializer(), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name='detection-boxes') super(DetectionHead, self).build(input_shape) def call(self, inputs: tf.Tensor, training: bool = None): """Forward pass of box and class branches for the Mask-RCNN model. Args: inputs: A `tf.Tensor` of the shape [batch_size, num_instances, roi_height, roi_width, roi_channels], representing the ROI features. training: a `bool` indicating whether it is in `training` mode. Returns: class_outputs: A `tf.Tensor` of the shape [batch_size, num_rois, num_classes], representing the class predictions. box_outputs: A `tf.Tensor` of the shape [batch_size, num_rois, num_classes * 4], representing the box predictions. """ roi_features = inputs _, num_rois, height, width, filters = roi_features.get_shape().as_list() x = tf.reshape(roi_features, [-1, height, width, filters]) for conv, bn in zip(self._convs, self._conv_norms): x = conv(x) x = bn(x) x = self._activation(x) _, _, _, filters = x.get_shape().as_list() x = tf.reshape(x, [-1, num_rois, height * width * filters]) for fc, bn in zip(self._fcs, self._fc_norms): x = fc(x) x = bn(x) x = self._activation(x) classes = self._classifier(x) boxes = self._box_regressor(x) return classes, boxes def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Vision') class MaskHead(tf.keras.layers.Layer): """Creates a mask head.""" def __init__( self, num_classes: int, upsample_factor: int = 2, num_convs: int = 4, num_filters: int = 256, use_separable_conv: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, class_agnostic: bool = False, **kwargs): """Initializes a mask head. Args: num_classes: An `int` of the number of classes. upsample_factor: An `int` that indicates the upsample factor to generate the final predicted masks. It should be >= 1. num_convs: An `int` number that represents the number of the intermediate convolution layers before the mask prediction layers. num_filters: An `int` number that represents the number of filters of the intermediate convolution layers. use_separable_conv: A `bool` that indicates whether the separable convolution layers is used. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. class_agnostic: A `bool`. If set, we use a single channel mask head that is shared between all classes. **kwargs: Additional keyword arguments to be passed. """ super(MaskHead, self).__init__(**kwargs) self._config_dict = { 'num_classes': num_classes, 'upsample_factor': upsample_factor, 'num_convs': num_convs, 'num_filters': num_filters, 'use_separable_conv': use_separable_conv, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, 'class_agnostic': class_agnostic } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the head.""" conv_op = (tf.keras.layers.SeparableConv2D if self._config_dict['use_separable_conv'] else tf.keras.layers.Conv2D) conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', } if self._config_dict['use_separable_conv']: conv_kwargs.update({ 'depthwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'pointwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'depthwise_regularizer': self._config_dict['kernel_regularizer'], 'pointwise_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) else: conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) bn_op = tf.keras.layers.BatchNormalization bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], 'synchronized': self._config_dict['use_sync_bn'], } self._convs = [] self._conv_norms = [] for i in range(self._config_dict['num_convs']): conv_name = 'mask-conv_{}'.format(i) for initializer_name in ['kernel_initializer', 'depthwise_initializer', 'pointwise_initializer']: if initializer_name in conv_kwargs: conv_kwargs[initializer_name] = tf_utils.clone_initializer( conv_kwargs[initializer_name]) self._convs.append(conv_op(name=conv_name, **conv_kwargs)) bn_name = 'mask-conv-bn_{}'.format(i) self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs)) self._deconv = tf.keras.layers.Conv2DTranspose( filters=self._config_dict['num_filters'], kernel_size=self._config_dict['upsample_factor'], strides=self._config_dict['upsample_factor'], padding='valid', kernel_initializer=tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=self._config_dict['kernel_regularizer'], bias_regularizer=self._config_dict['bias_regularizer'], name='mask-upsampling') self._deconv_bn = bn_op(name='mask-deconv-bn', **bn_kwargs) if self._config_dict['class_agnostic']: num_filters = 1 else: num_filters = self._config_dict['num_classes'] conv_kwargs = { 'filters': num_filters, 'kernel_size': 1, 'padding': 'valid', } if self._config_dict['use_separable_conv']: conv_kwargs.update({ 'depthwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'pointwise_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'depthwise_regularizer': self._config_dict['kernel_regularizer'], 'pointwise_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) else: conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.VarianceScaling( scale=2, mode='fan_out', distribution='untruncated_normal'), 'bias_initializer': tf.zeros_initializer(), 'kernel_regularizer': self._config_dict['kernel_regularizer'], 'bias_regularizer': self._config_dict['bias_regularizer'], }) self._mask_regressor = conv_op(name='mask-logits', **conv_kwargs) super(MaskHead, self).build(input_shape) def call(self, inputs: List[tf.Tensor], training: bool = None): """Forward pass of mask branch for the Mask-RCNN model. Args: inputs: A `list` of two tensors where inputs[0]: A `tf.Tensor` of shape [batch_size, num_instances, roi_height, roi_width, roi_channels], representing the ROI features. inputs[1]: A `tf.Tensor` of shape [batch_size, num_instances], representing the classes of the ROIs. training: A `bool` indicating whether it is in `training` mode. Returns: mask_outputs: A `tf.Tensor` of shape [batch_size, num_instances, roi_height * upsample_factor, roi_width * upsample_factor], representing the mask predictions. """ roi_features, roi_classes = inputs _, num_rois, height, width, filters = roi_features.get_shape().as_list() x = tf.reshape(roi_features, [-1, height, width, filters]) for conv, bn in zip(self._convs, self._conv_norms): x = conv(x) x = bn(x) x = self._activation(x) x = self._deconv(x) x = self._deconv_bn(x) x = self._activation(x) logits = self._mask_regressor(x) mask_height = height * self._config_dict['upsample_factor'] mask_width = width * self._config_dict['upsample_factor'] if self._config_dict['class_agnostic']: return tf.reshape(logits, [-1, num_rois, mask_height, mask_width]) else: logits = tf.reshape( logits, [-1, num_rois, mask_height, mask_width, self._config_dict['num_classes']]) return tf.gather( logits, tf.cast(roi_classes, dtype=tf.int32), axis=-1, batch_dims=2 ) def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
17,819
40.059908
80
py
models
models-master/official/vision/modeling/heads/dense_prediction_heads.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains definitions of dense prediction heads.""" from typing import Any, Dict, List, Mapping, Optional, Union # Import libraries import numpy as np import tensorflow as tf from official.modeling import tf_utils @tf.keras.utils.register_keras_serializable(package='Vision') class RetinaNetHead(tf.keras.layers.Layer): """Creates a RetinaNet head.""" def __init__( self, min_level: int, max_level: int, num_classes: int, num_anchors_per_location: int, num_convs: int = 4, num_filters: int = 256, attribute_heads: Optional[List[Dict[str, Any]]] = None, share_classification_heads: bool = False, use_separable_conv: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, num_params_per_anchor: int = 4, share_level_convs: bool = True, **kwargs, ): """Initializes a RetinaNet head. Args: min_level: An `int` number of minimum feature level. max_level: An `int` number of maximum feature level. num_classes: An `int` number of classes to predict. num_anchors_per_location: An `int` number of anchors per pixel location. num_convs: An `int` number that represents the number of the intermediate conv layers before the prediction. num_filters: An `int` number that represents the number of filters of the intermediate conv layers. attribute_heads: If not None, a list that contains a dict for each additional attribute head. Each dict consists of 4 key-value pairs: `name`, `type` ('regression' or 'classification'), `size` (number of predicted values for each instance), and `prediction_tower_name` (optional, specifies shared prediction towers.) share_classification_heads: A `bool` that indicates whether sharing weights among the main and attribute classification heads. use_separable_conv: A `bool` that indicates whether the separable convolution layers is used. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. num_params_per_anchor: Number of parameters required to specify an anchor box. For example, `num_params_per_anchor` would be 4 for axis-aligned anchor boxes specified by their y-centers, x-centers, heights, and widths. share_level_convs: An optional bool to enable sharing convs across levels for classnet, boxnet, classifier and box regressor. If True, convs will be shared across all levels. **kwargs: Additional keyword arguments to be passed. """ super().__init__(**kwargs) self._config_dict = { 'min_level': min_level, 'max_level': max_level, 'num_classes': num_classes, 'num_anchors_per_location': num_anchors_per_location, 'num_convs': num_convs, 'num_filters': num_filters, 'attribute_heads': attribute_heads, 'share_classification_heads': share_classification_heads, 'use_separable_conv': use_separable_conv, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, 'num_params_per_anchor': num_params_per_anchor, 'share_level_convs': share_level_convs, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) self._conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: self._conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=0.01), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], } self._classifier_kwargs = { 'filters': ( self._config_dict['num_classes'] * self._config_dict['num_anchors_per_location'] ), 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: self._classifier_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._box_regressor_kwargs = { 'filters': ( self._config_dict['num_params_per_anchor'] * self._config_dict['num_anchors_per_location'] ), 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: self._box_regressor_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) if self._config_dict['attribute_heads']: self._init_attribute_kwargs() def _conv_kwargs_new_kernel_init(self, conv_kwargs): if 'kernel_initializer' in conv_kwargs: conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer( conv_kwargs['kernel_initializer'] ) return conv_kwargs def _init_attribute_kwargs(self): self._attribute_kwargs = [] for att_config in self._config_dict['attribute_heads']: att_type = att_config['type'] att_size = att_config['size'] att_prediction_tower_name = att_config['prediction_tower_name'] att_predictor_kwargs = { 'filters': att_size * self._config_dict['num_anchors_per_location'], 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if att_type == 'regression': att_predictor_kwargs.update( {'bias_initializer': tf.zeros_initializer()} ) elif att_type == 'classification': att_predictor_kwargs.update( { 'bias_initializer': tf.constant_initializer( -np.log((1 - 0.01) / 0.01) ) } ) else: raise ValueError( 'Attribute head type {} not supported.'.format(att_type) ) if ( att_prediction_tower_name and self._config_dict['share_classification_heads'] ): raise ValueError( 'share_classification_heads cannot be set as True when' ' att_prediction_tower_name is specified.' ) if not self._config_dict['use_separable_conv']: att_predictor_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=1e-5 ), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._attribute_kwargs.append(att_predictor_kwargs) def _apply_prediction_tower(self, features, convs, norms) -> tf.Tensor: x = features for conv, norm in zip(convs, norms): x = conv(x) x = norm(x) x = self._activation(x) return x def _apply_attribute_net( self, attributes, level, level_idx, this_level_features, classnet_x ): prediction_tower_output = {} for att_config in self._config_dict['attribute_heads']: att_name = att_config['name'] att_type = att_config['type'] if ( self._config_dict['share_classification_heads'] and att_type == 'classification' ): attributes[att_name][str(level)] = self._att_predictors[att_name]( classnet_x ) else: def _apply_attribute_prediction_tower( atttribute_name, features, feature_level ): return self._apply_prediction_tower( features, self._att_convs[atttribute_name], self._att_norms[atttribute_name][feature_level], ) prediction_tower_name = att_config['prediction_tower_name'] if not prediction_tower_name: attributes[att_name][str(level)] = self._att_predictors[att_name]( _apply_attribute_prediction_tower( att_name, this_level_features, level_idx ) ) else: if prediction_tower_name not in prediction_tower_output: prediction_tower_output[prediction_tower_name] = ( _apply_attribute_prediction_tower( att_name, this_level_features, level_idx ) ) attributes[att_name][str(level)] = self._att_predictors[att_name]( prediction_tower_output[prediction_tower_name] ) def _build_prediction_tower( self, net_name, predictor_name, conv_op, bn_op, predictor_kwargs ): """Builds the prediction tower. Convs across levels can be shared or not.""" convs = [] norms = [] for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1 ): if not self._config_dict['share_level_convs']: this_level_convs = [] this_level_norms = [] for i in range(self._config_dict['num_convs']): conv_kwargs = self._conv_kwargs_new_kernel_init(self._conv_kwargs) if not self._config_dict['share_level_convs']: # Do not share convs. this_level_convs.append( conv_op(name=f'{net_name}-conv_{level}_{i}', **conv_kwargs) ) elif level == self._config_dict['min_level']: convs.append(conv_op(name=f'{net_name}-conv_{i}', **conv_kwargs)) this_level_norms.append( bn_op(name=f'{net_name}-conv-norm_{level}_{i}', **self._bn_kwargs) ) norms.append(this_level_norms) if not self._config_dict['share_level_convs']: convs.append(this_level_convs) # Create predictors after additional convs. if self._config_dict['share_level_convs']: predictors = conv_op(name=predictor_name, **predictor_kwargs) else: predictors = [] for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1 ): predictors.append( conv_op(name=f'{predictor_name}-{level}', **predictor_kwargs) ) return convs, norms, predictors def _build_attribute_net(self, conv_op, bn_op): self._att_predictors = {} self._att_convs = {} self._att_norms = {} for att_config, att_predictor_kwargs in zip( self._config_dict['attribute_heads'], self._attribute_kwargs ): att_name = att_config['name'] att_num_convs = ( att_config.get('num_convs') or self._config_dict['num_convs'] ) att_num_filters = ( att_config.get('num_filters') or self._config_dict['num_filters'] ) if att_num_convs < 0: raise ValueError(f'Invalid `num_convs` {att_num_convs} for {att_name}.') if att_num_filters < 0: raise ValueError( f'Invalid `num_filters` {att_num_filters} for {att_name}.' ) att_conv_kwargs = self._conv_kwargs.copy() att_conv_kwargs['filters'] = att_num_filters att_convs_i = [] att_norms_i = [] # Build conv and norm layers. for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1 ): this_level_att_norms = [] for i in range(att_num_convs): if level == self._config_dict['min_level']: att_conv_name = '{}-conv_{}'.format(att_name, i) att_convs_i.append(conv_op(name=att_conv_name, **att_conv_kwargs)) att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i) this_level_att_norms.append( bn_op(name=att_norm_name, **self._bn_kwargs) ) att_norms_i.append(this_level_att_norms) self._att_convs[att_name] = att_convs_i self._att_norms[att_name] = att_norms_i # Build the final prediction layer. self._att_predictors[att_name] = conv_op( name='{}_attributes'.format(att_name), **att_predictor_kwargs ) def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]): """Creates the variables of the head.""" conv_op = ( tf.keras.layers.SeparableConv2D if self._config_dict['use_separable_conv'] else tf.keras.layers.Conv2D ) bn_op = ( tf.keras.layers.experimental.SyncBatchNormalization if self._config_dict['use_sync_bn'] else tf.keras.layers.BatchNormalization ) # Class net. self._cls_convs, self._cls_norms, self._classifier = ( self._build_prediction_tower( 'classnet', 'scores', conv_op, bn_op, self._classifier_kwargs ) ) # Box net. self._box_convs, self._box_norms, self._box_regressor = ( self._build_prediction_tower( 'boxnet', 'boxes', conv_op, bn_op, self._box_regressor_kwargs ) ) # Attribute learning nets. if self._config_dict['attribute_heads']: self._build_attribute_net(conv_op, bn_op) super().build(input_shape) def call(self, features: Mapping[str, tf.Tensor]): """Forward pass of the RetinaNet head. Args: features: A `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor`, the feature map tensors, whose shape is [batch, height_l, width_l, channels]. Returns: scores: A `dict` of `tf.Tensor` which includes scores of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_classes * num_anchors_per_location]. boxes: A `dict` of `tf.Tensor` which includes coordinates of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_params_per_anchor * num_anchors_per_location]. attributes: a dict of (attribute_name, attribute_prediction). Each `attribute_prediction` is a dict of: - key: `str`, the level of the multilevel predictions. - values: `Tensor`, the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, attribute_size * num_anchors_per_location]. Can be an empty dictionary if no attribute learning is required. """ scores = {} boxes = {} if self._config_dict['attribute_heads']: attributes = { att_config['name']: {} for att_config in self._config_dict['attribute_heads'] } else: attributes = {} for i, level in enumerate( range(self._config_dict['min_level'], self._config_dict['max_level'] + 1)): this_level_features = features[str(level)] if self._config_dict['share_level_convs']: cls_convs = self._cls_convs box_convs = self._box_convs classifier = self._classifier box_regressor = self._box_regressor else: cls_convs = self._cls_convs[i] box_convs = self._box_convs[i] classifier = self._classifier[i] box_regressor = self._box_regressor[i] # Apply class net. x = self._apply_prediction_tower( this_level_features, cls_convs, self._cls_norms[i] ) scores[str(level)] = classifier(x) classnet_x = x # Apply box net. x = self._apply_prediction_tower( this_level_features, box_convs, self._box_norms[i] ) boxes[str(level)] = box_regressor(x) # Apply attribute nets. if self._config_dict['attribute_heads']: self._apply_attribute_net( attributes, level, i, this_level_features, classnet_x ) return scores, boxes, attributes def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Vision') class RPNHead(tf.keras.layers.Layer): """Creates a Region Proposal Network (RPN) head.""" def __init__( self, min_level: int, max_level: int, num_anchors_per_location: int, num_convs: int = 1, num_filters: int = 256, use_separable_conv: bool = False, activation: str = 'relu', use_sync_bn: bool = False, norm_momentum: float = 0.99, norm_epsilon: float = 0.001, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, **kwargs): """Initializes a Region Proposal Network head. Args: min_level: An `int` number of minimum feature level. max_level: An `int` number of maximum feature level. num_anchors_per_location: An `int` number of number of anchors per pixel location. num_convs: An `int` number that represents the number of the intermediate convolution layers before the prediction. num_filters: An `int` number that represents the number of filters of the intermediate convolution layers. use_separable_conv: A `bool` that indicates whether the separable convolution layers is used. activation: A `str` that indicates which activation is used, e.g. 'relu', 'swish', etc. use_sync_bn: A `bool` that indicates whether to use synchronized batch normalization across different replicas. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. Default is None. bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D. **kwargs: Additional keyword arguments to be passed. """ super(RPNHead, self).__init__(**kwargs) self._config_dict = { 'min_level': min_level, 'max_level': max_level, 'num_anchors_per_location': num_anchors_per_location, 'num_convs': num_convs, 'num_filters': num_filters, 'use_separable_conv': use_separable_conv, 'activation': activation, 'use_sync_bn': use_sync_bn, 'norm_momentum': norm_momentum, 'norm_epsilon': norm_epsilon, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer, } if tf.keras.backend.image_data_format() == 'channels_last': self._bn_axis = -1 else: self._bn_axis = 1 self._activation = tf_utils.get_activation(activation) def build(self, input_shape): """Creates the variables of the head.""" conv_op = (tf.keras.layers.SeparableConv2D if self._config_dict['use_separable_conv'] else tf.keras.layers.Conv2D) conv_kwargs = { 'filters': self._config_dict['num_filters'], 'kernel_size': 3, 'padding': 'same', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: conv_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=0.01), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) bn_op = (tf.keras.layers.experimental.SyncBatchNormalization if self._config_dict['use_sync_bn'] else tf.keras.layers.BatchNormalization) bn_kwargs = { 'axis': self._bn_axis, 'momentum': self._config_dict['norm_momentum'], 'epsilon': self._config_dict['norm_epsilon'], } self._convs = [] self._norms = [] for level in range( self._config_dict['min_level'], self._config_dict['max_level'] + 1): this_level_norms = [] for i in range(self._config_dict['num_convs']): if level == self._config_dict['min_level']: conv_name = 'rpn-conv_{}'.format(i) if 'kernel_initializer' in conv_kwargs: conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer( conv_kwargs['kernel_initializer']) self._convs.append(conv_op(name=conv_name, **conv_kwargs)) norm_name = 'rpn-conv-norm_{}_{}'.format(level, i) this_level_norms.append(bn_op(name=norm_name, **bn_kwargs)) self._norms.append(this_level_norms) classifier_kwargs = { 'filters': self._config_dict['num_anchors_per_location'], 'kernel_size': 1, 'padding': 'valid', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: classifier_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._classifier = conv_op(name='rpn-scores', **classifier_kwargs) box_regressor_kwargs = { 'filters': 4 * self._config_dict['num_anchors_per_location'], 'kernel_size': 1, 'padding': 'valid', 'bias_initializer': tf.zeros_initializer(), 'bias_regularizer': self._config_dict['bias_regularizer'], } if not self._config_dict['use_separable_conv']: box_regressor_kwargs.update({ 'kernel_initializer': tf.keras.initializers.RandomNormal( stddev=1e-5), 'kernel_regularizer': self._config_dict['kernel_regularizer'], }) self._box_regressor = conv_op(name='rpn-boxes', **box_regressor_kwargs) super(RPNHead, self).build(input_shape) def call(self, features: Mapping[str, tf.Tensor]): """Forward pass of the RPN head. Args: features: A `dict` of `tf.Tensor` where - key: A `str` of the level of the multilevel features. - values: A `tf.Tensor`, the feature map tensors, whose shape is [batch, height_l, width_l, channels]. Returns: scores: A `dict` of `tf.Tensor` which includes scores of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, num_classes * num_anchors_per_location]. boxes: A `dict` of `tf.Tensor` which includes coordinates of the predictions. - key: A `str` of the level of the multilevel predictions. - values: A `tf.Tensor` of the box scores predicted from a particular feature level, whose shape is [batch, height_l, width_l, 4 * num_anchors_per_location]. """ scores = {} boxes = {} for i, level in enumerate( range(self._config_dict['min_level'], self._config_dict['max_level'] + 1)): x = features[str(level)] for conv, norm in zip(self._convs, self._norms[i]): x = conv(x) x = norm(x) x = self._activation(x) scores[str(level)] = self._classifier(x) boxes[str(level)] = self._box_regressor(x) return scores, boxes def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
25,552
37.195815
80
py
models
models-master/official/vision/tasks/video_classification.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Video classification task definition.""" from typing import Any, Optional, List, Tuple from absl import logging import tensorflow as tf from official.core import base_task from official.core import task_factory from official.modeling import tf_utils from official.vision.configs import video_classification as exp_cfg from official.vision.dataloaders import input_reader_factory from official.vision.dataloaders import video_input from official.vision.modeling import factory_3d from official.vision.ops import augment @task_factory.register_task_cls(exp_cfg.VideoClassificationTask) class VideoClassificationTask(base_task.Task): """A task for video classification.""" def _get_num_classes(self): """Gets the number of classes.""" return self.task_config.train_data.num_classes def _get_feature_shape(self): """Get the common feature shape for train and eval.""" return [ d1 if d1 == d2 else None for d1, d2 in zip(self.task_config.train_data.feature_shape, self.task_config.validation_data.feature_shape) ] def _get_num_test_views(self): """Gets number of views for test.""" num_test_clips = self.task_config.validation_data.num_test_clips num_test_crops = self.task_config.validation_data.num_test_crops num_test_views = num_test_clips * num_test_crops return num_test_views def _is_multilabel(self): """If the label is multi-labels.""" return self.task_config.train_data.is_multilabel def build_model(self): """Builds video classification model.""" common_input_shape = self._get_feature_shape() input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape) logging.info('Build model input %r', common_input_shape) l2_weight_decay = float(self.task_config.losses.l2_weight_decay) # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = factory_3d.build_model( self.task_config.model.model_type, input_specs=input_specs, model_config=self.task_config.model, num_classes=self._get_num_classes(), l2_regularizer=l2_regularizer) if self.task_config.freeze_backbone: logging.info('Freezing model backbone.') model.backbone.trainable = False return model def initialize(self, model: tf.keras.Model): """Loads pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if self.task_config.init_checkpoint_modules == 'all': ckpt = tf.train.Checkpoint(model=model) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() elif self.task_config.init_checkpoint_modules == 'backbone': ckpt = tf.train.Checkpoint(backbone=model.backbone) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: raise ValueError( "Only 'all' or 'backbone' can be used to initialize the model.") logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def _get_dataset_fn(self, params): if params.file_type == 'tfrecord': return tf.data.TFRecordDataset else: raise ValueError('Unknown input file type {!r}'.format(params.file_type)) def _get_decoder_fn(self, params): if params.tfds_name: decoder = video_input.VideoTfdsDecoder( image_key=params.image_field_key, label_key=params.label_field_key) else: decoder = video_input.Decoder( image_key=params.image_field_key, label_key=params.label_field_key) if self.task_config.train_data.output_audio: assert self.task_config.train_data.audio_feature, 'audio feature is empty' decoder.add_feature(self.task_config.train_data.audio_feature, tf.io.VarLenFeature(dtype=tf.float32)) return decoder.decode def build_inputs(self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None): """Builds classification input.""" parser = video_input.Parser( input_params=params, image_key=params.image_field_key, label_key=params.label_field_key) postprocess_fn = video_input.PostBatchProcessor(params) if params.mixup_and_cutmix is not None: def mixup_and_cutmix(features, labels): augmenter = augment.MixupAndCutmix( mixup_alpha=params.mixup_and_cutmix.mixup_alpha, cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha, prob=params.mixup_and_cutmix.prob, label_smoothing=params.mixup_and_cutmix.label_smoothing, num_classes=self._get_num_classes()) features['image'], labels = augmenter(features['image'], labels) return features, labels postprocess_fn = mixup_and_cutmix reader = input_reader_factory.input_reader_generator( params, dataset_fn=self._get_dataset_fn(params), decoder_fn=self._get_decoder_fn(params), parser_fn=parser.parse_fn(params.is_training), postprocess_fn=postprocess_fn) dataset = reader.read(input_context=input_context) return dataset def build_losses(self, labels: Any, model_outputs: Any, aux_losses: Optional[Any] = None): """Sparse categorical cross entropy loss. Args: labels: labels. model_outputs: Output logits of the classifier. aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model. Returns: The total loss tensor. """ all_losses = {} losses_config = self.task_config.losses total_loss = None if self._is_multilabel(): entropy = -tf.reduce_mean( tf.reduce_sum(model_outputs * tf.math.log(model_outputs + 1e-8), -1)) total_loss = tf.keras.losses.binary_crossentropy( labels, model_outputs, from_logits=False) all_losses.update({ 'class_loss': total_loss, 'entropy': entropy, }) else: if losses_config.one_hot: total_loss = tf.keras.losses.categorical_crossentropy( labels, model_outputs, from_logits=False, label_smoothing=losses_config.label_smoothing) else: total_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, model_outputs, from_logits=False) total_loss = tf_utils.safe_mean(total_loss) all_losses.update({ 'class_loss': total_loss, }) if aux_losses: all_losses.update({ 'reg_loss': aux_losses, }) total_loss += tf.add_n(aux_losses) all_losses[self.loss] = total_loss return all_losses def build_metrics(self, training: bool = True): """Gets streaming metrics for training/validation.""" if self.task_config.losses.one_hot: metrics = [ tf.keras.metrics.CategoricalAccuracy(name='accuracy'), tf.keras.metrics.TopKCategoricalAccuracy(k=1, name='top_1_accuracy'), tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='top_5_accuracy') ] if self._is_multilabel(): metrics.append( tf.keras.metrics.AUC( curve='ROC', multi_label=self._is_multilabel(), name='ROC-AUC')) metrics.append( tf.keras.metrics.RecallAtPrecision( 0.95, name='RecallAtPrecision95')) metrics.append( tf.keras.metrics.AUC( curve='PR', multi_label=self._is_multilabel(), name='PR-AUC')) if self.task_config.metrics.use_per_class_recall: for i in range(self._get_num_classes()): metrics.append( tf.keras.metrics.Recall(class_id=i, name=f'recall-{i}')) else: metrics = [ tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'), tf.keras.metrics.SparseTopKCategoricalAccuracy( k=1, name='top_1_accuracy'), tf.keras.metrics.SparseTopKCategoricalAccuracy( k=5, name='top_5_accuracy') ] return metrics def process_metrics(self, metrics: List[Any], labels: Any, model_outputs: Any): """Process and update metrics. Called when using custom training loop API. Args: metrics: a nested structure of metrics objects. The return of function self.build_metrics. labels: a tensor or a nested structure of tensors. model_outputs: a tensor or a nested structure of tensors. For example, output of the keras model built by self.build_model. """ for metric in metrics: metric.update_state(labels, model_outputs) def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs input_partition_dims = self.task_config.train_input_partition_dims if input_partition_dims: strategy = tf.distribute.get_strategy() features['image'] = strategy.experimental_split_to_logical_devices( features['image'], input_partition_dims) num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) # Casting output layer as float32 is necessary when mixed_precision is # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32. outputs = tf.nest.map_structure( lambda x: tf.cast(x, tf.float32), outputs) # Computes per-replica loss. if self._is_multilabel(): outputs = tf.nest.map_structure(tf.math.sigmoid, outputs) else: outputs = tf.nest.map_structure(tf.math.softmax, outputs) all_losses = self.build_losses( model_outputs=outputs, labels=labels, aux_losses=model.losses) loss = all_losses[self.loss] # Scales loss as the default gradients allreduce performs sum inside the # optimizer. scaled_loss = loss / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance( optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient before apply_gradients when LossScaleOptimizer is # used. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = all_losses if metrics: self.process_metrics(metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics}) elif model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in model.metrics}) return logs def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs input_partition_dims = self.task_config.eval_input_partition_dims if input_partition_dims: strategy = tf.distribute.get_strategy() features['image'] = strategy.experimental_split_to_logical_devices( features['image'], input_partition_dims) outputs = self.inference_step(features, model) outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) logs = self.build_losses(model_outputs=outputs, labels=labels, aux_losses=model.losses) if metrics: self.process_metrics(metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics}) elif model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in model.metrics}) return logs def inference_step(self, features: tf.Tensor, model: tf.keras.Model): """Performs the forward step.""" outputs = model(features, training=False) if self._is_multilabel(): outputs = tf.nest.map_structure(tf.math.sigmoid, outputs) else: outputs = tf.nest.map_structure(tf.math.softmax, outputs) num_test_views = self._get_num_test_views() if num_test_views > 1: # Averaging output probabilities across multiples views. outputs = tf.reshape(outputs, [-1, num_test_views, outputs.shape[-1]]) outputs = tf.reduce_mean(outputs, axis=1) return outputs
14,308
37.672973
80
py
models
models-master/official/vision/tasks/retinanet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RetinaNet task definition.""" from typing import Any, List, Mapping, Optional, Tuple from absl import logging import tensorflow as tf from official.common import dataset_fn from official.core import base_task from official.core import task_factory from official.vision.configs import retinanet as exp_cfg from official.vision.dataloaders import input_reader from official.vision.dataloaders import input_reader_factory from official.vision.dataloaders import retinanet_input from official.vision.dataloaders import tf_example_decoder from official.vision.dataloaders import tfds_factory from official.vision.dataloaders import tf_example_label_map_decoder from official.vision.evaluation import coco_evaluator from official.vision.losses import focal_loss from official.vision.losses import loss_utils from official.vision.modeling import factory from official.vision.utils.object_detection import visualization_utils @task_factory.register_task_cls(exp_cfg.RetinaNetTask) class RetinaNetTask(base_task.Task): """A single-replica view of training procedure. RetinaNet task provides artifacts for training/evalution procedures, including loading/iterating over Datasets, initializing the model, calculating the loss, post-processing, and customized metrics with reduction. """ def build_model(self): """Build RetinaNet model.""" input_specs = tf.keras.layers.InputSpec( shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = factory.build_retinanet( input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) if self.task_config.freeze_backbone: model.backbone.trainable = False return model def initialize(self, model: tf.keras.Model): """Loading pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if self.task_config.init_checkpoint_modules == 'all': ckpt = tf.train.Checkpoint(**model.checkpoint_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: ckpt_items = {} if 'backbone' in self.task_config.init_checkpoint_modules: ckpt_items.update(backbone=model.backbone) if 'decoder' in self.task_config.init_checkpoint_modules: ckpt_items.update(decoder=model.decoder) ckpt = tf.train.Checkpoint(**ckpt_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def build_inputs(self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None): """Build input dataset.""" if params.tfds_name: decoder = tfds_factory.get_detection_decoder(params.tfds_name) else: decoder_cfg = params.decoder.get() if params.decoder.type == 'simple_decoder': decoder = tf_example_decoder.TfExampleDecoder( regenerate_source_id=decoder_cfg.regenerate_source_id, attribute_names=decoder_cfg.attribute_names, ) elif params.decoder.type == 'label_map_decoder': decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap( label_map=decoder_cfg.label_map, regenerate_source_id=decoder_cfg.regenerate_source_id) else: raise ValueError('Unknown decoder type: {}!'.format( params.decoder.type)) parser = retinanet_input.Parser( output_size=self.task_config.model.input_size[:2], min_level=self.task_config.model.min_level, max_level=self.task_config.model.max_level, num_scales=self.task_config.model.anchor.num_scales, aspect_ratios=self.task_config.model.anchor.aspect_ratios, anchor_size=self.task_config.model.anchor.anchor_size, dtype=params.dtype, match_threshold=params.parser.match_threshold, unmatched_threshold=params.parser.unmatched_threshold, aug_type=params.parser.aug_type, aug_rand_hflip=params.parser.aug_rand_hflip, aug_scale_min=params.parser.aug_scale_min, aug_scale_max=params.parser.aug_scale_max, skip_crowd_during_training=params.parser.skip_crowd_during_training, max_num_instances=params.parser.max_num_instances) reader = input_reader_factory.input_reader_generator( params, dataset_fn=dataset_fn.pick_dataset_fn(params.file_type), decoder_fn=decoder.decode, combine_fn=input_reader.create_combine_fn(params), parser_fn=parser.parse_fn(params.is_training)) dataset = reader.read(input_context=input_context) return dataset def build_attribute_loss(self, attribute_heads: List[exp_cfg.AttributeHead], outputs: Mapping[str, Any], labels: Mapping[str, Any], box_sample_weight: tf.Tensor) -> float: """Computes attribute loss. Args: attribute_heads: a list of attribute head configs. outputs: RetinaNet model outputs. labels: RetinaNet labels. box_sample_weight: normalized bounding box sample weights. Returns: Attribute loss of all attribute heads. """ params = self.task_config attribute_loss = 0.0 for head in attribute_heads: if head.name not in labels['attribute_targets']: raise ValueError(f'Attribute {head.name} not found in label targets.') if head.name not in outputs['attribute_outputs']: raise ValueError(f'Attribute {head.name} not found in model outputs.') if head.type == 'regression': y_true_att = loss_utils.multi_level_flatten( labels['attribute_targets'][head.name], last_dim=head.size ) y_pred_att = loss_utils.multi_level_flatten( outputs['attribute_outputs'][head.name], last_dim=head.size ) att_loss_fn = tf.keras.losses.Huber( 1.0, reduction=tf.keras.losses.Reduction.SUM) att_loss = att_loss_fn( y_true=y_true_att, y_pred=y_pred_att, sample_weight=box_sample_weight) elif head.type == 'classification': y_true_att = loss_utils.multi_level_flatten( labels['attribute_targets'][head.name], last_dim=None ) y_true_att = tf.one_hot(y_true_att, head.size) y_pred_att = loss_utils.multi_level_flatten( outputs['attribute_outputs'][head.name], last_dim=head.size ) cls_loss_fn = focal_loss.FocalLoss( alpha=params.losses.focal_loss_alpha, gamma=params.losses.focal_loss_gamma, reduction=tf.keras.losses.Reduction.SUM, ) att_loss = cls_loss_fn( y_true=y_true_att, y_pred=y_pred_att, sample_weight=box_sample_weight, ) else: raise ValueError(f'Attribute type {head.type} not supported.') attribute_loss += att_loss return attribute_loss def build_losses(self, outputs: Mapping[str, Any], labels: Mapping[str, Any], aux_losses: Optional[Any] = None): """Build RetinaNet losses.""" params = self.task_config attribute_heads = self.task_config.model.head.attribute_heads cls_loss_fn = focal_loss.FocalLoss( alpha=params.losses.focal_loss_alpha, gamma=params.losses.focal_loss_gamma, reduction=tf.keras.losses.Reduction.SUM) box_loss_fn = tf.keras.losses.Huber( params.losses.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) # Sums all positives in a batch for normalization and avoids zero # num_positives_sum, which would lead to inf loss during training cls_sample_weight = labels['cls_weights'] box_sample_weight = labels['box_weights'] num_positives = tf.reduce_sum(box_sample_weight) + 1.0 cls_sample_weight = cls_sample_weight / num_positives box_sample_weight = box_sample_weight / num_positives y_true_cls = loss_utils.multi_level_flatten( labels['cls_targets'], last_dim=None) y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes) y_pred_cls = loss_utils.multi_level_flatten( outputs['cls_outputs'], last_dim=params.model.num_classes) y_true_box = loss_utils.multi_level_flatten( labels['box_targets'], last_dim=4) y_pred_box = loss_utils.multi_level_flatten( outputs['box_outputs'], last_dim=4) cls_loss = cls_loss_fn( y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight) box_loss = box_loss_fn( y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight) model_loss = cls_loss + params.losses.box_loss_weight * box_loss if attribute_heads: model_loss += self.build_attribute_loss(attribute_heads, outputs, labels, box_sample_weight) total_loss = model_loss if aux_losses: reg_loss = tf.reduce_sum(aux_losses) total_loss = model_loss + reg_loss total_loss = params.losses.loss_weight * total_loss return total_loss, cls_loss, box_loss, model_loss def build_metrics(self, training: bool = True): """Build detection metrics.""" metrics = [] metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss'] for name in metric_names: metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32)) if not training: if ( self.task_config.validation_data.tfds_name and self.task_config.annotation_file ): raise ValueError( "Can't evaluate using annotation file when TFDS is used." ) if self._task_config.use_coco_metrics: self.coco_metric = coco_evaluator.COCOEvaluator( annotation_file=self.task_config.annotation_file, include_mask=False, per_category_metrics=self.task_config.per_category_metrics, max_num_eval_detections=self.task_config.max_num_eval_detections, ) if self._task_config.use_wod_metrics: # To use Waymo open dataset metrics, please install one of the pip # package `waymo-open-dataset-tf-*` from # https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux # Note that the package is built with specific tensorflow version and # will produce error if it does not match the tf version that is # currently used. try: from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top except ModuleNotFoundError: logging.error('waymo-open-dataset should be installed to enable Waymo' ' evaluator.') raise self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator() return metrics def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) outputs = tf.nest.map_structure( lambda x: tf.cast(x, tf.float32), outputs) # Computes per-replica loss. loss, cls_loss, box_loss, model_loss = self.build_losses( outputs=outputs, labels=labels, aux_losses=model.losses) scaled_loss = loss / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient when LossScaleOptimizer is used. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = {self.loss: loss} all_losses = { 'total_loss': loss, 'cls_loss': cls_loss, 'box_loss': box_loss, 'model_loss': model_loss, } if metrics: for m in metrics: m.update_state(all_losses[m.name]) logs.update({m.name: m.result()}) return logs def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs outputs = model(features, anchor_boxes=labels['anchor_boxes'], image_shape=labels['image_info'][:, 1, :], training=False) loss, cls_loss, box_loss, model_loss = self.build_losses( outputs=outputs, labels=labels, aux_losses=model.losses) logs = {self.loss: loss} all_losses = { 'total_loss': loss, 'cls_loss': cls_loss, 'box_loss': box_loss, 'model_loss': model_loss, } if self._task_config.use_coco_metrics: coco_model_outputs = { 'detection_boxes': outputs['detection_boxes'], 'detection_scores': outputs['detection_scores'], 'detection_classes': outputs['detection_classes'], 'num_detections': outputs['num_detections'], 'source_id': labels['groundtruths']['source_id'], 'image_info': labels['image_info'] } logs.update( {self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)}) if self.task_config.use_wod_metrics: wod_model_outputs = { 'detection_boxes': outputs['detection_boxes'], 'detection_scores': outputs['detection_scores'], 'detection_classes': outputs['detection_classes'], 'num_detections': outputs['num_detections'], 'source_id': labels['groundtruths']['source_id'], 'image_info': labels['image_info'] } logs.update( {self.wod_metric.name: (labels['groundtruths'], wod_model_outputs)}) if metrics: for m in metrics: m.update_state(all_losses[m.name]) logs.update({m.name: m.result()}) if ( hasattr(self.task_config, 'allow_image_summary') and self.task_config.allow_image_summary ): logs.update( {'visualization': (tf.cast(features, dtype=tf.float32), outputs)} ) return logs def aggregate_logs(self, state=None, step_outputs=None): if self._task_config.use_coco_metrics: if state is None: self.coco_metric.reset_states() self.coco_metric.update_state(step_outputs[self.coco_metric.name][0], step_outputs[self.coco_metric.name][1]) if self._task_config.use_wod_metrics: if state is None: self.wod_metric.reset_states() self.wod_metric.update_state(step_outputs[self.wod_metric.name][0], step_outputs[self.wod_metric.name][1]) if 'visualization' in step_outputs: # Update detection state for writing summary if there are artifacts for # visualization. if state is None: state = {} state.update(visualization_utils.update_detection_state(step_outputs)) if state is None: # Create an arbitrary state to indicate it's not the first step in the # following calls to this function. state = True return state def reduce_aggregated_logs(self, aggregated_logs, global_step=None): logs = {} if self._task_config.use_coco_metrics: logs.update(self.coco_metric.result()) if self._task_config.use_wod_metrics: logs.update(self.wod_metric.result()) # Add visualization for summary. if isinstance(aggregated_logs, dict) and 'image' in aggregated_logs: validation_outputs = visualization_utils.visualize_outputs( logs=aggregated_logs, task_config=self.task_config ) logs.update(validation_outputs) return logs
18,008
37.980519
138
py
models
models-master/official/vision/tasks/semantic_segmentation.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image segmentation task definition.""" from typing import Any, List, Mapping, Optional, Tuple, Union from absl import logging import tensorflow as tf from official.common import dataset_fn from official.core import base_task from official.core import task_factory from official.vision.configs import semantic_segmentation as exp_cfg from official.vision.dataloaders import input_reader from official.vision.dataloaders import input_reader_factory from official.vision.dataloaders import segmentation_input from official.vision.dataloaders import tfds_factory from official.vision.evaluation import segmentation_metrics from official.vision.losses import segmentation_losses from official.vision.modeling import factory @task_factory.register_task_cls(exp_cfg.SemanticSegmentationTask) class SemanticSegmentationTask(base_task.Task): """A task for semantic segmentation.""" def build_model(self): """Builds segmentation model.""" input_specs = tf.keras.layers.InputSpec(shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = ( tf.keras.regularizers.l2(l2_weight_decay / 2.0) if l2_weight_decay else None) model = factory.build_segmentation_model( input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) # Builds the model dummy_inputs = tf.keras.Input(self.task_config.model.input_size) _ = model(dummy_inputs, training=False) return model def initialize(self, model: tf.keras.Model): """Loads pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if 'all' in self.task_config.init_checkpoint_modules: ckpt = tf.train.Checkpoint(**model.checkpoint_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: ckpt_items = {} if 'backbone' in self.task_config.init_checkpoint_modules: ckpt_items.update(backbone=model.backbone) if 'decoder' in self.task_config.init_checkpoint_modules: ckpt_items.update(decoder=model.decoder) ckpt = tf.train.Checkpoint(**ckpt_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def build_inputs(self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None): """Builds classification input.""" ignore_label = self.task_config.losses.ignore_label gt_is_matting_map = self.task_config.losses.gt_is_matting_map if params.tfds_name: decoder = tfds_factory.get_segmentation_decoder(params.tfds_name) else: decoder = segmentation_input.Decoder( image_feature=params.image_feature, additional_dense_features=params.additional_dense_features) parser = segmentation_input.Parser( output_size=params.output_size, crop_size=params.crop_size, ignore_label=ignore_label, resize_eval_groundtruth=params.resize_eval_groundtruth, gt_is_matting_map=gt_is_matting_map, groundtruth_padded_size=params.groundtruth_padded_size, aug_scale_min=params.aug_scale_min, aug_scale_max=params.aug_scale_max, aug_rand_hflip=params.aug_rand_hflip, preserve_aspect_ratio=params.preserve_aspect_ratio, dtype=params.dtype, image_feature=params.image_feature, additional_dense_features=params.additional_dense_features) reader = input_reader_factory.input_reader_generator( params, dataset_fn=dataset_fn.pick_dataset_fn(params.file_type), decoder_fn=decoder.decode, combine_fn=input_reader.create_combine_fn(params), parser_fn=parser.parse_fn(params.is_training)) dataset = reader.read(input_context=input_context) return dataset def build_losses(self, labels: Mapping[str, tf.Tensor], model_outputs: Union[Mapping[str, tf.Tensor], tf.Tensor], aux_losses: Optional[Any] = None): """Segmentation loss. Args: labels: labels. model_outputs: Output logits of the classifier. aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model. Returns: The total loss tensor. """ loss_params = self._task_config.losses segmentation_loss_fn = segmentation_losses.SegmentationLoss( loss_params.label_smoothing, loss_params.class_weights, loss_params.ignore_label, use_groundtruth_dimension=loss_params.use_groundtruth_dimension, use_binary_cross_entropy=loss_params.use_binary_cross_entropy, top_k_percent_pixels=loss_params.top_k_percent_pixels, gt_is_matting_map=loss_params.gt_is_matting_map) total_loss = segmentation_loss_fn(model_outputs['logits'], labels['masks']) if 'mask_scores' in model_outputs: mask_scoring_loss_fn = segmentation_losses.MaskScoringLoss( loss_params.ignore_label) total_loss += loss_params.mask_scoring_weight * mask_scoring_loss_fn( model_outputs['mask_scores'], model_outputs['logits'], labels['masks']) if aux_losses: total_loss += tf.add_n(aux_losses) total_loss = loss_params.loss_weight * total_loss return total_loss def process_metrics(self, metrics, labels, model_outputs, **kwargs): """Process and update metrics. Called when using custom training loop API. Args: metrics: a nested structure of metrics objects. The return of function self.build_metrics. labels: a tensor or a nested structure of tensors. model_outputs: a tensor or a nested structure of tensors. For example, output of the keras model built by self.build_model. **kwargs: other args. """ for metric in metrics: if 'mask_scores_mse' == metric.name: actual_mask_scores = segmentation_losses.get_actual_mask_scores( model_outputs['logits'], labels['masks'], self.task_config.losses.ignore_label) metric.update_state(actual_mask_scores, model_outputs['mask_scores']) else: metric.update_state(labels, model_outputs['logits']) def build_metrics(self, training: bool = True): """Gets streaming metrics for training/validation.""" metrics = [] self.iou_metric = None if training and self.task_config.evaluation.report_train_mean_iou: metrics.append( segmentation_metrics.MeanIoU( name='mean_iou', num_classes=self.task_config.model.num_classes, rescale_predictions=False, dtype=tf.float32)) if self.task_config.model.get('mask_scoring_head'): metrics.append( tf.keras.metrics.MeanSquaredError(name='mask_scores_mse')) if not training: self.iou_metric = segmentation_metrics.PerClassIoU( name='per_class_iou', num_classes=self.task_config.model.num_classes, rescale_predictions=( not self.task_config.validation_data.resize_eval_groundtruth), dtype=tf.float32) if (self.task_config.validation_data.resize_eval_groundtruth and self.task_config.model.get('mask_scoring_head')): # Masks scores metric can only be computed if labels are scaled to match # preticted mask scores. metrics.append( tf.keras.metrics.MeanSquaredError(name='mask_scores_mse')) return metrics def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs input_partition_dims = self.task_config.train_input_partition_dims if input_partition_dims: strategy = tf.distribute.get_strategy() features = strategy.experimental_split_to_logical_devices( features, input_partition_dims) num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) if isinstance(outputs, tf.Tensor): outputs = {'logits': outputs} # Casting output layer as float32 is necessary when mixed_precision is # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32. outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) # Computes per-replica loss. loss = self.build_losses( model_outputs=outputs, labels=labels, aux_losses=model.losses) # Scales loss as the default gradients allreduce performs sum inside the # optimizer. scaled_loss = loss / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient before apply_gradients when LossScaleOptimizer is # used. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = {self.loss: loss} if metrics: self.process_metrics(metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics}) return logs def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs input_partition_dims = self.task_config.eval_input_partition_dims if input_partition_dims: strategy = tf.distribute.get_strategy() features = strategy.experimental_split_to_logical_devices( features, input_partition_dims) outputs = self.inference_step(features, model) if isinstance(outputs, tf.Tensor): outputs = {'logits': outputs} outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) if self.task_config.validation_data.resize_eval_groundtruth: loss = self.build_losses( model_outputs=outputs, labels=labels, aux_losses=model.losses) else: loss = 0 logs = {self.loss: loss} if self.iou_metric is not None: self.iou_metric.update_state(labels, outputs['logits']) if metrics: self.process_metrics(metrics, labels, outputs) return logs def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model): """Performs the forward step.""" return model(inputs, training=False) def aggregate_logs(self, state=None, step_outputs=None): if state is None and self.iou_metric is not None: self.iou_metric.reset_states() state = self.iou_metric return state def reduce_aggregated_logs(self, aggregated_logs, global_step=None): result = {} if self.iou_metric is not None: ious = self.iou_metric.result() # TODO(arashwan): support loading class name from a label map file. if self.task_config.evaluation.report_per_class_iou: for i, value in enumerate(ious.numpy()): result.update({'iou/{}'.format(i): value}) # Computes mean IoU result.update({'mean_iou': tf.reduce_mean(ious)}) return result
13,246
37.175793
80
py
models
models-master/official/vision/tasks/maskrcnn.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MaskRCNN task definition.""" import os from typing import Any, Dict, List, Mapping, Optional, Tuple from absl import logging import numpy as np import tensorflow as tf from official.common import dataset_fn as dataset_fn_lib from official.core import base_task from official.core import task_factory from official.vision.configs import maskrcnn as exp_cfg from official.vision.dataloaders import input_reader from official.vision.dataloaders import input_reader_factory from official.vision.dataloaders import maskrcnn_input from official.vision.dataloaders import tf_example_decoder from official.vision.dataloaders import tf_example_label_map_decoder from official.vision.evaluation import coco_evaluator from official.vision.evaluation import coco_utils from official.vision.evaluation import instance_metrics as metrics_lib from official.vision.losses import maskrcnn_losses from official.vision.modeling import factory from official.vision.utils.object_detection import visualization_utils def zero_out_disallowed_class_ids(batch_class_ids: tf.Tensor, allowed_class_ids: List[int]): """Zeroes out IDs of classes not in allowed_class_ids. Args: batch_class_ids: A [batch_size, num_instances] int tensor of input class IDs. allowed_class_ids: A python list of class IDs which we want to allow. Returns: filtered_class_ids: A [batch_size, num_instances] int tensor with any class ID not in allowed_class_ids set to 0. """ allowed_class_ids = tf.constant(allowed_class_ids, dtype=batch_class_ids.dtype) match_ids = (batch_class_ids[:, :, tf.newaxis] == allowed_class_ids[tf.newaxis, tf.newaxis, :]) match_ids = tf.reduce_any(match_ids, axis=2) return tf.where(match_ids, batch_class_ids, tf.zeros_like(batch_class_ids)) @task_factory.register_task_cls(exp_cfg.MaskRCNNTask) class MaskRCNNTask(base_task.Task): """A single-replica view of training procedure. Mask R-CNN task provides artifacts for training/evalution procedures, including loading/iterating over Datasets, initializing the model, calculating the loss, post-processing, and customized metrics with reduction. """ def build_model(self): """Builds Mask R-CNN model.""" input_specs = tf.keras.layers.InputSpec( shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = factory.build_maskrcnn( input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) if self.task_config.freeze_backbone: model.backbone.trainable = False # Builds the model through warm-up call. dummy_images = tf.keras.Input(self.task_config.model.input_size) dummy_image_shape = tf.keras.layers.Input([2]) _ = model(dummy_images, image_shape=dummy_image_shape, training=False) return model def initialize(self, model: tf.keras.Model): """Loads pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if self.task_config.init_checkpoint_modules == 'all': ckpt = tf.train.Checkpoint(model=model) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: ckpt_items = {} if 'backbone' in self.task_config.init_checkpoint_modules: ckpt_items.update(backbone=model.backbone) if 'decoder' in self.task_config.init_checkpoint_modules: ckpt_items.update(decoder=model.decoder) ckpt = tf.train.Checkpoint(**ckpt_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def build_inputs( self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None, dataset_fn: Optional[dataset_fn_lib.PossibleDatasetType] = None ) -> tf.data.Dataset: """Builds input dataset.""" decoder_cfg = params.decoder.get() if params.decoder.type == 'simple_decoder': decoder = tf_example_decoder.TfExampleDecoder( include_mask=self._task_config.model.include_mask, regenerate_source_id=decoder_cfg.regenerate_source_id, mask_binarize_threshold=decoder_cfg.mask_binarize_threshold) elif params.decoder.type == 'label_map_decoder': decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap( label_map=decoder_cfg.label_map, include_mask=self._task_config.model.include_mask, regenerate_source_id=decoder_cfg.regenerate_source_id, mask_binarize_threshold=decoder_cfg.mask_binarize_threshold) else: raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type)) parser = maskrcnn_input.Parser( output_size=self.task_config.model.input_size[:2], min_level=self.task_config.model.min_level, max_level=self.task_config.model.max_level, num_scales=self.task_config.model.anchor.num_scales, aspect_ratios=self.task_config.model.anchor.aspect_ratios, anchor_size=self.task_config.model.anchor.anchor_size, rpn_match_threshold=params.parser.rpn_match_threshold, rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold, rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im, rpn_fg_fraction=params.parser.rpn_fg_fraction, aug_rand_hflip=params.parser.aug_rand_hflip, aug_rand_vflip=params.parser.aug_rand_vflip, aug_scale_min=params.parser.aug_scale_min, aug_scale_max=params.parser.aug_scale_max, aug_type=params.parser.aug_type, skip_crowd_during_training=params.parser.skip_crowd_during_training, max_num_instances=params.parser.max_num_instances, include_mask=self.task_config.model.include_mask, outer_boxes_scale=self.task_config.model.outer_boxes_scale, mask_crop_size=params.parser.mask_crop_size, dtype=params.dtype, ) if not dataset_fn: dataset_fn = dataset_fn_lib.pick_dataset_fn(params.file_type) reader = input_reader_factory.input_reader_generator( params, dataset_fn=dataset_fn, decoder_fn=decoder.decode, combine_fn=input_reader.create_combine_fn(params), parser_fn=parser.parse_fn(params.is_training)) dataset = reader.read(input_context=input_context) return dataset def _build_rpn_losses( self, outputs: Mapping[str, Any], labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]: """Builds losses for Region Proposal Network (RPN).""" rpn_score_loss_fn = maskrcnn_losses.RpnScoreLoss( tf.shape(outputs['box_outputs'])[1]) rpn_box_loss_fn = maskrcnn_losses.RpnBoxLoss( self.task_config.losses.rpn_huber_loss_delta) rpn_score_loss = tf.reduce_mean( rpn_score_loss_fn(outputs['rpn_scores'], labels['rpn_score_targets'])) rpn_box_loss = tf.reduce_mean( rpn_box_loss_fn(outputs['rpn_boxes'], labels['rpn_box_targets'])) return rpn_score_loss, rpn_box_loss def _build_frcnn_losses( self, outputs: Mapping[str, Any], labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]: """Builds losses for Fast R-CNN.""" cascade_ious = self.task_config.model.roi_sampler.cascade_iou_thresholds frcnn_cls_loss_fn = maskrcnn_losses.FastrcnnClassLoss( use_binary_cross_entropy=self.task_config.losses .frcnn_class_use_binary_cross_entropy, top_k_percent=self.task_config.losses.frcnn_class_loss_top_k_percent) frcnn_box_loss_fn = maskrcnn_losses.FastrcnnBoxLoss( self.task_config.losses.frcnn_huber_loss_delta, self.task_config.model.detection_head.class_agnostic_bbox_pred) # Final cls/box losses are computed as an average of all detection heads. frcnn_cls_loss = 0.0 frcnn_box_loss = 0.0 num_det_heads = 1 if cascade_ious is None else 1 + len(cascade_ious) for cas_num in range(num_det_heads): frcnn_cls_loss_i = tf.reduce_mean( frcnn_cls_loss_fn( outputs['class_outputs_{}' .format(cas_num) if cas_num else 'class_outputs'], outputs['class_targets_{}' .format(cas_num) if cas_num else 'class_targets'])) frcnn_box_loss_i = tf.reduce_mean( frcnn_box_loss_fn( outputs['box_outputs_{}'.format(cas_num ) if cas_num else 'box_outputs'], outputs['class_targets_{}' .format(cas_num) if cas_num else 'class_targets'], outputs['box_targets_{}'.format(cas_num ) if cas_num else 'box_targets'])) frcnn_cls_loss += frcnn_cls_loss_i frcnn_box_loss += frcnn_box_loss_i frcnn_cls_loss /= num_det_heads frcnn_box_loss /= num_det_heads return frcnn_cls_loss, frcnn_box_loss def _build_mask_loss(self, outputs: Mapping[str, Any]) -> tf.Tensor: """Builds losses for the masks.""" mask_loss_fn = maskrcnn_losses.MaskrcnnLoss() mask_class_targets = outputs['mask_class_targets'] if self.task_config.allowed_mask_class_ids is not None: # Classes with ID=0 are ignored by mask_loss_fn in loss computation. mask_class_targets = zero_out_disallowed_class_ids( mask_class_targets, self.task_config.allowed_mask_class_ids) return tf.reduce_mean( mask_loss_fn(outputs['mask_outputs'], outputs['mask_targets'], mask_class_targets)) def build_losses(self, outputs: Mapping[str, Any], labels: Mapping[str, Any], aux_losses: Optional[Any] = None) -> Dict[str, tf.Tensor]: """Builds Mask R-CNN losses.""" rpn_score_loss, rpn_box_loss = self._build_rpn_losses(outputs, labels) frcnn_cls_loss, frcnn_box_loss = self._build_frcnn_losses(outputs, labels) if self.task_config.model.include_mask: mask_loss = self._build_mask_loss(outputs) else: mask_loss = tf.constant(0.0, dtype=tf.float32) params = self.task_config model_loss = ( params.losses.rpn_score_weight * rpn_score_loss + params.losses.rpn_box_weight * rpn_box_loss + params.losses.frcnn_class_weight * frcnn_cls_loss + params.losses.frcnn_box_weight * frcnn_box_loss + params.losses.mask_weight * mask_loss) total_loss = model_loss if aux_losses: reg_loss = tf.reduce_sum(aux_losses) total_loss = model_loss + reg_loss total_loss = params.losses.loss_weight * total_loss losses = { 'total_loss': total_loss, 'rpn_score_loss': rpn_score_loss, 'rpn_box_loss': rpn_box_loss, 'frcnn_cls_loss': frcnn_cls_loss, 'frcnn_box_loss': frcnn_box_loss, 'mask_loss': mask_loss, 'model_loss': model_loss, } return losses def _build_coco_metrics(self): """Builds COCO metrics evaluator.""" if (not self._task_config.model.include_mask ) or self._task_config.annotation_file: self.coco_metric = coco_evaluator.COCOEvaluator( annotation_file=self._task_config.annotation_file, include_mask=self._task_config.model.include_mask, per_category_metrics=self._task_config.per_category_metrics) else: # Builds COCO-style annotation file if include_mask is True, and # annotation_file isn't provided. annotation_path = os.path.join(self._logging_dir, 'annotation.json') if tf.io.gfile.exists(annotation_path): logging.info( 'annotation.json file exists, skipping creating the annotation' ' file.') else: if self._task_config.validation_data.num_examples <= 0: logging.info('validation_data.num_examples needs to be > 0') if not self._task_config.validation_data.input_path: logging.info('Can not create annotation file for tfds.') logging.info( 'Creating coco-style annotation file: %s', annotation_path) coco_utils.scan_and_generator_annotation_file( self._task_config.validation_data.input_path, self._task_config.validation_data.file_type, self._task_config.validation_data.num_examples, self.task_config.model.include_mask, annotation_path, regenerate_source_id=self._task_config.validation_data.decoder .simple_decoder.regenerate_source_id) self.coco_metric = coco_evaluator.COCOEvaluator( annotation_file=annotation_path, include_mask=self._task_config.model.include_mask, per_category_metrics=self._task_config.per_category_metrics) def build_metrics(self, training: bool = True): """Builds detection metrics.""" self.instance_box_perclass_metrics = None self.instance_mask_perclass_metrics = None if training: metric_names = [ 'total_loss', 'rpn_score_loss', 'rpn_box_loss', 'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss', 'model_loss', ] return [ tf.keras.metrics.Mean(name, dtype=tf.float32) for name in metric_names ] else: if self._task_config.use_coco_metrics: self._build_coco_metrics() if self._task_config.use_wod_metrics: # To use Waymo open dataset metrics, please install one of the pip # package `waymo-open-dataset-tf-*` from # https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux # Note that the package is built with specific tensorflow version and # will produce error if it does not match the tf version that is # currently used. try: from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top except ModuleNotFoundError: logging.error('waymo-open-dataset should be installed to enable Waymo' ' evaluator.') raise self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator() if self.task_config.use_approx_instance_metrics: self.instance_box_perclass_metrics = metrics_lib.InstanceMetrics( name='instance_box_perclass', num_classes=self.task_config.model.num_classes, iou_thresholds=np.arange(0.5, 1.0, step=0.05), ) if self.task_config.model.include_mask: self.instance_mask_perclass_metrics = metrics_lib.InstanceMetrics( name='instance_mask_perclass', use_masks=True, num_classes=self.task_config.model.num_classes, iou_thresholds=np.arange(0.5, 1.0, step=0.05), ) return [] def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ images, labels = inputs num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: model_kwargs = { 'image_shape': labels['image_info'][:, 1, :], 'anchor_boxes': labels['anchor_boxes'], 'gt_boxes': labels['gt_boxes'], 'gt_classes': labels['gt_classes'], 'training': True, } if self.task_config.model.include_mask: model_kwargs['gt_masks'] = labels['gt_masks'] if self.task_config.model.outer_boxes_scale > 1.0: model_kwargs['gt_outer_boxes'] = labels['gt_outer_boxes'] outputs = model( images, **model_kwargs) outputs = tf.nest.map_structure( lambda x: tf.cast(x, tf.float32), outputs) # Computes per-replica loss. losses = self.build_losses( outputs=outputs, labels=labels, aux_losses=model.losses) scaled_loss = losses['total_loss'] / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient when LossScaleOptimizer is used. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = {self.loss: losses['total_loss']} if metrics: for m in metrics: m.update_state(losses[m.name]) return logs def _update_metrics(self, labels, outputs, logs): instance_predictions = { 'detection_boxes': outputs['detection_boxes'], 'detection_scores': outputs['detection_scores'], 'detection_classes': outputs['detection_classes'], 'num_detections': outputs['num_detections'], 'source_id': labels['groundtruths']['source_id'], 'image_info': labels['image_info'], } if 'detection_outer_boxes' in outputs: instance_predictions['detection_outer_boxes'] = outputs[ 'detection_outer_boxes' ] if 'detection_masks' in outputs: instance_predictions['detection_masks'] = outputs['detection_masks'] if self._task_config.use_coco_metrics: logs[self.coco_metric.name] = ( labels['groundtruths'], instance_predictions, ) if self.task_config.use_wod_metrics: logs[self.wod_metric.name] = ( labels['groundtruths'], instance_predictions, ) instance_labels = { 'boxes': labels['groundtruths']['boxes'], 'classes': labels['groundtruths']['classes'], 'is_crowds': labels['groundtruths']['is_crowds'], 'image_info': labels['image_info'], } if self.instance_box_perclass_metrics is not None: self.instance_box_perclass_metrics.update_state( y_true=instance_labels, y_pred=instance_predictions ) if self.instance_mask_perclass_metrics is not None: instance_labels['masks'] = labels['groundtruths']['masks'] self.instance_mask_perclass_metrics.update_state( y_true=instance_labels, y_pred=instance_predictions ) def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Validatation step. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ images, labels = inputs outputs = model( images, anchor_boxes=labels['anchor_boxes'], image_shape=labels['image_info'][:, 1, :], training=False, ) logs = {self.loss: 0} self._update_metrics(labels, outputs, logs) if ( hasattr(self.task_config, 'allow_image_summary') and self.task_config.allow_image_summary ): logs.update( {'visualization': (tf.cast(images, dtype=tf.float32), outputs)} ) return logs def aggregate_logs( self, state: Optional[Any] = None, step_outputs: Optional[Dict[str, Any]] = None, ) -> Optional[Any]: """Optional aggregation over logs returned from a validation step.""" if not state: # The metrics which update state on CPU. if self.task_config.use_coco_metrics: self.coco_metric.reset_states() if self.task_config.use_wod_metrics: self.wod_metric.reset_states() if self.task_config.use_coco_metrics: self.coco_metric.update_state( step_outputs[self.coco_metric.name][0], step_outputs[self.coco_metric.name][1], ) if self.task_config.use_wod_metrics: self.wod_metric.update_state( step_outputs[self.wod_metric.name][0], step_outputs[self.wod_metric.name][1], ) if 'visualization' in step_outputs: # Update detection state for writing summary if there are artifacts for # visualization. if state is None: state = {} state.update(visualization_utils.update_detection_state(step_outputs)) # TODO(allenyan): Mapping `detection_masks` (w.r.t. the `gt_boxes`) back # to full masks (w.r.t. the image). Disable mask visualization fow now. state.pop('detection_masks', None) if not state: # Create an arbitrary state to indicate it's not the first step in the # following calls to this function. state = True return state def _reduce_instance_metrics( self, logs: Dict[str, Any], use_masks: bool = False ): """Updates the per class and mean instance metrics in the logs.""" if use_masks: instance_metrics = self.instance_mask_perclass_metrics prefix = 'mask_' else: instance_metrics = self.instance_box_perclass_metrics prefix = '' result = instance_metrics.result() iou_thresholds = instance_metrics.get_config()['iou_thresholds'] for ap_key in instance_metrics.get_average_precision_metrics_keys(): # (num_iou_thresholds, num_classes) per_class_ap = tf.where( result['valid_classes'], result[ap_key], tf.zeros_like(result[ap_key]) ) # (num_iou_thresholds,) mean_ap_by_iou = tf.math.divide_no_nan( tf.reduce_sum(per_class_ap, axis=-1), tf.reduce_sum( tf.cast(result['valid_classes'], dtype=per_class_ap.dtype), axis=-1, ), ) logs[f'{prefix}{ap_key}'] = tf.reduce_mean(mean_ap_by_iou) for j, iou in enumerate(iou_thresholds): if int(iou * 100) in {50, 75}: logs[f'{prefix}{ap_key}{int(iou * 100)}'] = mean_ap_by_iou[j] if self.task_config.per_category_metrics: # (num_classes,) per_class_mean_ap = tf.reduce_mean(per_class_ap, axis=0) valid_classes = result['valid_classes'].numpy() for k in range(self.task_config.model.num_classes): if valid_classes[k]: logs[f'{prefix}{ap_key} ByCategory/{k}'] = per_class_mean_ap[k] for j, iou in enumerate(iou_thresholds): if int(iou * 100) in {50, 75}: logs[f'{prefix}{ap_key}{int(iou * 100)} ByCategory/{k}'] = ( per_class_ap[j][k] ) def reduce_aggregated_logs( self, aggregated_logs: Dict[str, Any], global_step: Optional[tf.Tensor] = None, ) -> Dict[str, tf.Tensor]: """Optional reduce of aggregated logs over validation steps.""" logs = {} # The metrics which update state on device. if self.instance_box_perclass_metrics is not None: self._reduce_instance_metrics(logs, use_masks=False) self.instance_box_perclass_metrics.reset_state() if self.instance_mask_perclass_metrics is not None: self._reduce_instance_metrics(logs, use_masks=True) self.instance_mask_perclass_metrics.reset_state() # The metrics which update state on CPU. if self.task_config.use_coco_metrics: logs.update(self.coco_metric.result()) if self.task_config.use_wod_metrics: logs.update(self.wod_metric.result()) # Add visualization for summary. if isinstance(aggregated_logs, dict) and 'image' in aggregated_logs: validation_outputs = visualization_utils.visualize_outputs( logs=aggregated_logs, task_config=self.task_config ) logs.update(validation_outputs) return logs
25,240
39.192675
138
py
models
models-master/official/vision/tasks/image_classification.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image classification task definition.""" from typing import Any, List, Optional, Tuple from absl import logging import tensorflow as tf from official.common import dataset_fn from official.core import base_task from official.core import task_factory from official.modeling import tf_utils from official.vision.configs import image_classification as exp_cfg from official.vision.dataloaders import classification_input from official.vision.dataloaders import input_reader from official.vision.dataloaders import input_reader_factory from official.vision.dataloaders import tfds_factory from official.vision.modeling import factory from official.vision.ops import augment _EPSILON = 1e-6 @task_factory.register_task_cls(exp_cfg.ImageClassificationTask) class ImageClassificationTask(base_task.Task): """A task for image classification.""" def build_model(self): """Builds classification model.""" input_specs = tf.keras.layers.InputSpec( shape=[None] + self.task_config.model.input_size) l2_weight_decay = self.task_config.losses.l2_weight_decay # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss. # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2) # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss) l2_regularizer = (tf.keras.regularizers.l2( l2_weight_decay / 2.0) if l2_weight_decay else None) model = factory.build_classification_model( input_specs=input_specs, model_config=self.task_config.model, l2_regularizer=l2_regularizer) if self.task_config.freeze_backbone: model.backbone.trainable = False # Builds the model dummy_inputs = tf.keras.Input(self.task_config.model.input_size) _ = model(dummy_inputs, training=False) return model def initialize(self, model: tf.keras.Model): """Loads pretrained checkpoint.""" if not self.task_config.init_checkpoint: return ckpt_dir_or_file = self.task_config.init_checkpoint if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) # Restoring checkpoint. if self.task_config.init_checkpoint_modules == 'all': ckpt = tf.train.Checkpoint(model=model) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() elif self.task_config.init_checkpoint_modules == 'backbone': ckpt = tf.train.Checkpoint(backbone=model.backbone) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() else: raise ValueError( "Only 'all' or 'backbone' can be used to initialize the model.") logging.info('Finished loading pretrained checkpoint from %s', ckpt_dir_or_file) def build_inputs( self, params: exp_cfg.DataConfig, input_context: Optional[tf.distribute.InputContext] = None ) -> tf.data.Dataset: """Builds classification input.""" num_classes = self.task_config.model.num_classes input_size = self.task_config.model.input_size image_field_key = self.task_config.train_data.image_field_key label_field_key = self.task_config.train_data.label_field_key is_multilabel = self.task_config.train_data.is_multilabel if params.tfds_name: decoder = tfds_factory.get_classification_decoder(params.tfds_name) else: decoder = classification_input.Decoder( image_field_key=image_field_key, label_field_key=label_field_key, is_multilabel=is_multilabel) parser = classification_input.Parser( output_size=input_size[:2], num_classes=num_classes, image_field_key=image_field_key, label_field_key=label_field_key, decode_jpeg_only=params.decode_jpeg_only, aug_rand_hflip=params.aug_rand_hflip, aug_crop=params.aug_crop, aug_type=params.aug_type, color_jitter=params.color_jitter, random_erasing=params.random_erasing, is_multilabel=is_multilabel, dtype=params.dtype, center_crop_fraction=params.center_crop_fraction, tf_resize_method=params.tf_resize_method, three_augment=params.three_augment) postprocess_fn = None if params.mixup_and_cutmix: postprocess_fn = augment.MixupAndCutmix( mixup_alpha=params.mixup_and_cutmix.mixup_alpha, cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha, prob=params.mixup_and_cutmix.prob, label_smoothing=params.mixup_and_cutmix.label_smoothing, num_classes=num_classes) def sample_fn(repeated_augment, dataset): weights = [1 / repeated_augment] * repeated_augment dataset = tf.data.Dataset.sample_from_datasets( datasets=[dataset] * repeated_augment, weights=weights, seed=None, stop_on_empty_dataset=True, ) return dataset is_repeated_augment = ( params.is_training and params.repeated_augment is not None ) reader = input_reader_factory.input_reader_generator( params, dataset_fn=dataset_fn.pick_dataset_fn(params.file_type), decoder_fn=decoder.decode, combine_fn=input_reader.create_combine_fn(params), parser_fn=parser.parse_fn(params.is_training), postprocess_fn=postprocess_fn, sample_fn=(lambda ds: sample_fn(params.repeated_augment, ds)) if is_repeated_augment else None, ) dataset = reader.read(input_context=input_context) return dataset def build_losses(self, labels: tf.Tensor, model_outputs: tf.Tensor, aux_losses: Optional[Any] = None) -> tf.Tensor: """Builds sparse categorical cross entropy loss. Args: labels: Input groundtruth labels. model_outputs: Output logits of the classifier. aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model. Returns: The total loss tensor. """ losses_config = self.task_config.losses is_multilabel = self.task_config.train_data.is_multilabel if not is_multilabel: if losses_config.use_binary_cross_entropy: total_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=labels, logits=model_outputs ) # Average over all object classes inside an image. total_loss = tf.reduce_mean(total_loss, axis=-1) elif losses_config.one_hot: total_loss = tf.keras.losses.categorical_crossentropy( labels, model_outputs, from_logits=True, label_smoothing=losses_config.label_smoothing) elif losses_config.soft_labels: total_loss = tf.nn.softmax_cross_entropy_with_logits( labels, model_outputs) else: total_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, model_outputs, from_logits=True) else: # Multi-label binary cross entropy loss. This will apply `reduce_mean`. total_loss = tf.keras.losses.binary_crossentropy( labels, model_outputs, from_logits=True, label_smoothing=losses_config.label_smoothing, axis=-1) # Multiple num_classes to behave like `reduce_sum`. total_loss = total_loss * self.task_config.model.num_classes total_loss = tf_utils.safe_mean(total_loss) if aux_losses: total_loss += tf.add_n(aux_losses) total_loss = losses_config.loss_weight * total_loss return total_loss def build_metrics(self, training: bool = True) -> List[tf.keras.metrics.Metric]: """Gets streaming metrics for training/validation.""" is_multilabel = self.task_config.train_data.is_multilabel if not is_multilabel: k = self.task_config.evaluation.top_k if (self.task_config.losses.one_hot or self.task_config.losses.soft_labels): metrics = [ tf.keras.metrics.CategoricalAccuracy(name='accuracy'), tf.keras.metrics.TopKCategoricalAccuracy( k=k, name='top_{}_accuracy'.format(k))] if hasattr( self.task_config.evaluation, 'precision_and_recall_thresholds' ) and self.task_config.evaluation.precision_and_recall_thresholds: thresholds = self.task_config.evaluation.precision_and_recall_thresholds # pylint: disable=line-too-long # pylint:disable=g-complex-comprehension metrics += [ tf.keras.metrics.Precision( thresholds=th, name='precision_at_threshold_{}'.format(th), top_k=1) for th in thresholds ] metrics += [ tf.keras.metrics.Recall( thresholds=th, name='recall_at_threshold_{}'.format(th), top_k=1) for th in thresholds ] # Add per-class precision and recall. if hasattr( self.task_config.evaluation, 'report_per_class_precision_and_recall' ) and self.task_config.evaluation.report_per_class_precision_and_recall: for class_id in range(self.task_config.model.num_classes): metrics += [ tf.keras.metrics.Precision( thresholds=th, class_id=class_id, name=f'precision_at_threshold_{th}/{class_id}', top_k=1) for th in thresholds ] metrics += [ tf.keras.metrics.Recall( thresholds=th, class_id=class_id, name=f'recall_at_threshold_{th}/{class_id}', top_k=1) for th in thresholds ] # pylint:enable=g-complex-comprehension else: metrics = [ tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'), tf.keras.metrics.SparseTopKCategoricalAccuracy( k=k, name='top_{}_accuracy'.format(k))] else: metrics = [] # These metrics destablize the training if included in training. The jobs # fail due to OOM. # TODO(arashwan): Investigate adding following metric to train. if not training: metrics = [ tf.keras.metrics.AUC( name='globalPR-AUC', curve='PR', multi_label=False, from_logits=True), tf.keras.metrics.AUC( name='meanPR-AUC', curve='PR', multi_label=True, num_labels=self.task_config.model.num_classes, from_logits=True), ] return metrics def train_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[Any]] = None): """Does forward and backward. Args: inputs: A tuple of input tensors of (features, labels). model: A tf.keras.Model instance. optimizer: The optimizer for this training step. metrics: A nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs is_multilabel = self.task_config.train_data.is_multilabel if self.task_config.losses.one_hot and not is_multilabel: labels = tf.one_hot(labels, self.task_config.model.num_classes) if self.task_config.losses.use_binary_cross_entropy: # BCE loss converts the multiclass classification to multilabel. The # corresponding label value of objects present in the image would be one. if self.task_config.train_data.mixup_and_cutmix is not None: # label values below off_value_threshold would be mapped to zero and # above that would be mapped to one. Negative labels are guaranteed to # have value less than or equal value of the off_value from mixup. off_value_threshold = ( self.task_config.train_data.mixup_and_cutmix.label_smoothing / self.task_config.model.num_classes ) labels = tf.where( tf.less(labels, off_value_threshold + _EPSILON), 0.0, 1.0) elif tf.rank(labels) == 1: labels = tf.one_hot(labels, self.task_config.model.num_classes) num_replicas = tf.distribute.get_strategy().num_replicas_in_sync with tf.GradientTape() as tape: outputs = model(features, training=True) # Casting output layer as float32 is necessary when mixed_precision is # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32. outputs = tf.nest.map_structure( lambda x: tf.cast(x, tf.float32), outputs) # Computes per-replica loss. loss = self.build_losses( model_outputs=outputs, labels=labels, aux_losses=model.losses) # Scales loss as the default gradients allreduce performs sum inside the # optimizer. scaled_loss = loss / num_replicas # For mixed_precision policy, when LossScaleOptimizer is used, loss is # scaled for numerical stability. if isinstance( optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) # Scales back gradient before apply_gradients when LossScaleOptimizer is # used. if isinstance( optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = {self.loss: loss} # Convert logits to softmax for metric computation if needed. if hasattr(self.task_config.model, 'output_softmax') and self.task_config.model.output_softmax: outputs = tf.nn.softmax(outputs, axis=-1) if metrics: self.process_metrics(metrics, labels, outputs) elif model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in model.metrics}) return logs def validation_step(self, inputs: Tuple[Any, Any], model: tf.keras.Model, metrics: Optional[List[Any]] = None): """Runs validatation step. Args: inputs: A tuple of input tensors of (features, labels). model: A tf.keras.Model instance. metrics: A nested structure of metrics objects. Returns: A dictionary of logs. """ features, labels = inputs one_hot = self.task_config.losses.one_hot soft_labels = self.task_config.losses.soft_labels is_multilabel = self.task_config.train_data.is_multilabel # Note: `soft_labels`` only apply to the training phrase. In the validation # phrase, labels should still be integer ids and need to be converted to # one hot format. if (one_hot or soft_labels) and not is_multilabel: labels = tf.one_hot(labels, self.task_config.model.num_classes) outputs = self.inference_step(features, model) outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs) loss = self.build_losses( model_outputs=outputs, labels=labels, aux_losses=model.losses) logs = {self.loss: loss} # Convert logits to softmax for metric computation if needed. if hasattr(self.task_config.model, 'output_softmax') and self.task_config.model.output_softmax: outputs = tf.nn.softmax(outputs, axis=-1) if metrics: self.process_metrics(metrics, labels, outputs) elif model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in model.metrics}) return logs def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model): """Performs the forward step.""" return model(inputs, training=False)
16,689
38.086651
115
py
models
models-master/official/vision/ops/preprocess_ops.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Preprocessing ops.""" import math from typing import Optional, Sequence, Tuple, Union from six.moves import range import tensorflow as tf from official.vision.ops import augment from official.vision.ops import box_ops CENTER_CROP_FRACTION = 0.875 # Calculated from the ImageNet training set MEAN_NORM = (0.485, 0.456, 0.406) STDDEV_NORM = (0.229, 0.224, 0.225) MEAN_RGB = tuple(255 * i for i in MEAN_NORM) STDDEV_RGB = tuple(255 * i for i in STDDEV_NORM) # Alias for convenience. PLEASE use `box_ops.horizontal_flip_boxes` directly. horizontal_flip_boxes = box_ops.horizontal_flip_boxes vertical_flip_boxes = box_ops.vertical_flip_boxes def clip_or_pad_to_fixed_size(input_tensor, size, constant_values=0): """Pads data to a fixed length at the first dimension. Args: input_tensor: `Tensor` with any dimension. size: `int` number for the first dimension of output Tensor. constant_values: `int` value assigned to the paddings. Returns: `Tensor` with the first dimension padded to `size`. """ input_shape = input_tensor.get_shape().as_list() padding_shape = [] # Computes the padding length on the first dimension, clip input tensor if it # is longer than `size`. input_length = tf.shape(input_tensor)[0] input_length = tf.clip_by_value(input_length, 0, size) input_tensor = input_tensor[:input_length] padding_length = tf.maximum(0, size - input_length) padding_shape.append(padding_length) # Copies shapes of the rest of input shape dimensions. for i in range(1, len(input_shape)): padding_shape.append(tf.shape(input_tensor)[i]) # Pads input tensor to the fixed first dimension. paddings = tf.cast(constant_values * tf.ones(padding_shape), input_tensor.dtype) padded_tensor = tf.concat([input_tensor, paddings], axis=0) output_shape = input_shape output_shape[0] = size padded_tensor.set_shape(output_shape) return padded_tensor def normalize_image(image: tf.Tensor, offset: Sequence[float] = MEAN_NORM, scale: Sequence[float] = STDDEV_NORM): """Normalizes the image to zero mean and unit variance.""" with tf.name_scope('normalize_image'): image = tf.image.convert_image_dtype(image, dtype=tf.float32) return normalize_scaled_float_image(image, offset, scale) def normalize_scaled_float_image(image: tf.Tensor, offset: Sequence[float] = MEAN_NORM, scale: Sequence[float] = STDDEV_NORM): """Normalizes a scaled float image to zero mean and unit variance. It assumes the input image is float dtype with values in [0, 1). Args: image: A tf.Tensor in float32 dtype with values in range [0, 1). offset: A tuple of mean values to be subtracted from the image. scale: A tuple of normalization factors. Returns: A normalized image tensor. """ offset = tf.constant(offset) offset = tf.expand_dims(offset, axis=0) offset = tf.expand_dims(offset, axis=0) image -= offset scale = tf.constant(scale) scale = tf.expand_dims(scale, axis=0) scale = tf.expand_dims(scale, axis=0) image /= scale return image def compute_padded_size(desired_size, stride): """Compute the padded size given the desired size and the stride. The padded size will be the smallest rectangle, such that each dimension is the smallest multiple of the stride which is larger than the desired dimension. For example, if desired_size = (100, 200) and stride = 32, the output padded_size = (128, 224). Args: desired_size: a `Tensor` or `int` list/tuple of two elements representing [height, width] of the target output image size. stride: an integer, the stride of the backbone network. Returns: padded_size: a `Tensor` or `int` list/tuple of two elements representing [height, width] of the padded output image size. """ if isinstance(desired_size, list) or isinstance(desired_size, tuple): padded_size = [int(math.ceil(d * 1.0 / stride) * stride) for d in desired_size] else: padded_size = tf.cast( tf.math.ceil( tf.cast(desired_size, dtype=tf.float32) / stride) * stride, tf.int32) return padded_size def resize_and_crop_image(image, desired_size, padded_size, aug_scale_min=1.0, aug_scale_max=1.0, seed=1, method=tf.image.ResizeMethod.BILINEAR): """Resizes the input image to output size (RetinaNet style). Resize and pad images given the desired output size of the image and stride size. Here are the preprocessing steps. 1. For a given image, keep its aspect ratio and rescale the image to make it the largest rectangle to be bounded by the rectangle specified by the `desired_size`. 2. Pad the rescaled image to the padded_size. Args: image: a `Tensor` of shape [height, width, 3] representing an image. desired_size: a `Tensor` or `int` list/tuple of two elements representing [height, width] of the desired actual output image size. padded_size: a `Tensor` or `int` list/tuple of two elements representing [height, width] of the padded output image size. Padding will be applied after scaling the image to the desired_size. Can be None to disable padding. aug_scale_min: a `float` with range between [0, 1.0] representing minimum random scale applied to desired_size for training scale jittering. aug_scale_max: a `float` with range between [1.0, inf] representing maximum random scale applied to desired_size for training scale jittering. seed: seed for random scale jittering. method: function to resize input image to scaled image. Returns: output_image: `Tensor` of shape [height, width, 3] where [height, width] equals to `output_size`. image_info: a 2D `Tensor` that encodes the information of the image and the applied preprocessing. It is in the format of [[original_height, original_width], [desired_height, desired_width], [y_scale, x_scale], [y_offset, x_offset]], where [desired_height, desired_width] is the actual scaled image size, and [y_scale, x_scale] is the scaling factor, which is the ratio of scaled dimension / original dimension. """ with tf.name_scope('resize_and_crop_image'): image_size = tf.cast(tf.shape(image)[0:2], tf.float32) random_jittering = ( isinstance(aug_scale_min, tf.Tensor) or isinstance(aug_scale_max, tf.Tensor) or not math.isclose(aug_scale_min, 1.0) or not math.isclose(aug_scale_max, 1.0) ) if random_jittering: random_scale = tf.random.uniform( [], aug_scale_min, aug_scale_max, seed=seed) scaled_size = tf.round(random_scale * tf.cast(desired_size, tf.float32)) else: scaled_size = tf.cast(desired_size, tf.float32) scale = tf.minimum( scaled_size[0] / image_size[0], scaled_size[1] / image_size[1]) scaled_size = tf.round(image_size * scale) # Computes 2D image_scale. image_scale = scaled_size / image_size # Selects non-zero random offset (x, y) if scaled image is larger than # desired_size. if random_jittering: max_offset = scaled_size - tf.cast(desired_size, tf.float32) max_offset = tf.where( tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset) offset = max_offset * tf.random.uniform([2,], 0, 1, seed=seed) offset = tf.cast(offset, tf.int32) else: offset = tf.zeros((2,), tf.int32) scaled_image = tf.image.resize( image, tf.cast(scaled_size, tf.int32), method=method) if random_jittering: scaled_image = scaled_image[ offset[0]:offset[0] + desired_size[0], offset[1]:offset[1] + desired_size[1], :] output_image = scaled_image if padded_size is not None: output_image = tf.image.pad_to_bounding_box( scaled_image, 0, 0, padded_size[0], padded_size[1]) image_info = tf.stack([ image_size, tf.cast(desired_size, dtype=tf.float32), image_scale, tf.cast(offset, tf.float32)]) return output_image, image_info def resize_and_crop_image_v2(image, short_side, long_side, padded_size, aug_scale_min=1.0, aug_scale_max=1.0, seed=1, method=tf.image.ResizeMethod.BILINEAR): """Resizes the input image to output size (Faster R-CNN style). Resize and pad images given the specified short / long side length and the stride size. Here are the preprocessing steps. 1. For a given image, keep its aspect ratio and first try to rescale the short side of the original image to `short_side`. 2. If the scaled image after 1 has a long side that exceeds `long_side`, keep the aspect ratio and rescale the long side of the image to `long_side`. 3. (Optional) Apply random jittering according to `aug_scale_min` and `aug_scale_max`. By default this step is skipped. 4. Pad the rescaled image to the padded_size. Args: image: a `Tensor` of shape [height, width, 3] representing an image. short_side: a scalar `Tensor` or `int` representing the desired short side to be rescaled to. long_side: a scalar `Tensor` or `int` representing the desired long side to be rescaled to. padded_size: a `Tensor` or `int` list/tuple of two elements representing [height, width] of the padded output image size. aug_scale_min: a `float` with range between [0, 1.0] representing minimum random scale applied for training scale jittering. aug_scale_max: a `float` with range between [1.0, inf] representing maximum random scale applied for training scale jittering. seed: seed for random scale jittering. method: function to resize input image to scaled image. Returns: output_image: `Tensor` of shape [height, width, 3] where [height, width] equals to `output_size`. image_info: a 2D `Tensor` that encodes the information of the image and the applied preprocessing. It is in the format of [[original_height, original_width], [desired_height, desired_width], [y_scale, x_scale], [y_offset, x_offset]], where [desired_height, desired_width] is the actual scaled image size, and [y_scale, x_scale] is the scaling factor, which is the ratio of scaled dimension / original dimension. """ with tf.name_scope('resize_and_crop_image_v2'): image_size = tf.cast(tf.shape(image)[0:2], tf.float32) scale_using_short_side = ( short_side / tf.math.minimum(image_size[0], image_size[1])) scale_using_long_side = ( long_side / tf.math.maximum(image_size[0], image_size[1])) scaled_size = tf.math.round(image_size * scale_using_short_side) scaled_size = tf.where( tf.math.greater( tf.math.maximum(scaled_size[0], scaled_size[1]), long_side), tf.math.round(image_size * scale_using_long_side), scaled_size) desired_size = scaled_size random_jittering = ( isinstance(aug_scale_min, tf.Tensor) or isinstance(aug_scale_max, tf.Tensor) or not math.isclose(aug_scale_min, 1.0) or not math.isclose(aug_scale_max, 1.0) ) if random_jittering: random_scale = tf.random.uniform( [], aug_scale_min, aug_scale_max, seed=seed) scaled_size = tf.math.round(random_scale * scaled_size) # Computes 2D image_scale. image_scale = scaled_size / image_size # Selects non-zero random offset (x, y) if scaled image is larger than # desired_size. if random_jittering: max_offset = scaled_size - desired_size max_offset = tf.where( tf.math.less(max_offset, 0), tf.zeros_like(max_offset), max_offset) offset = max_offset * tf.random.uniform([2,], 0, 1, seed=seed) offset = tf.cast(offset, tf.int32) else: offset = tf.zeros((2,), tf.int32) scaled_image = tf.image.resize( image, tf.cast(scaled_size, tf.int32), method=method) if random_jittering: scaled_image = scaled_image[ offset[0]:offset[0] + desired_size[0], offset[1]:offset[1] + desired_size[1], :] output_image = tf.image.pad_to_bounding_box( scaled_image, 0, 0, padded_size[0], padded_size[1]) image_info = tf.stack([ image_size, tf.cast(desired_size, dtype=tf.float32), image_scale, tf.cast(offset, tf.float32)]) return output_image, image_info def resize_image( image: tf.Tensor, size: Union[Tuple[int, int], int], max_size: Optional[int] = None, method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR): """Resize image with size and max_size. Args: image: the image to be resized. size: if list to tuple, resize to it. If scalar, we keep the same aspect ratio and resize the short side to the value. max_size: only used when size is a scalar. When the larger side is larger than max_size after resized with size we used max_size to keep the aspect ratio instead. method: the method argument passed to tf.image.resize. Returns: the resized image and image_info to be used for downstream processing. image_info: a 2D `Tensor` that encodes the information of the image and the applied preprocessing. It is in the format of [[original_height, original_width], [resized_height, resized_width], [y_scale, x_scale], [0, 0]], where [resized_height, resized_width] is the actual scaled image size, and [y_scale, x_scale] is the scaling factor, which is the ratio of scaled dimension / original dimension. """ def get_size_with_aspect_ratio(image_size, size, max_size=None): h = image_size[0] w = image_size[1] if max_size is not None: min_original_size = tf.cast(tf.math.minimum(w, h), dtype=tf.float32) max_original_size = tf.cast(tf.math.maximum(w, h), dtype=tf.float32) if max_original_size / min_original_size * size > max_size: size = tf.cast( tf.math.floor(max_size * min_original_size / max_original_size), dtype=tf.int32) else: size = tf.cast(size, tf.int32) else: size = tf.cast(size, tf.int32) if (w <= h and w == size) or (h <= w and h == size): return tf.stack([h, w]) if w < h: ow = size oh = tf.cast( (tf.cast(size, dtype=tf.float32) * tf.cast(h, dtype=tf.float32) / tf.cast(w, dtype=tf.float32)), dtype=tf.int32) else: oh = size ow = tf.cast( (tf.cast(size, dtype=tf.float32) * tf.cast(w, dtype=tf.float32) / tf.cast(h, dtype=tf.float32)), dtype=tf.int32) return tf.stack([oh, ow]) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size[::-1] else: return get_size_with_aspect_ratio(image_size, size, max_size) orignal_size = tf.shape(image)[0:2] size = get_size(orignal_size, size, max_size) rescaled_image = tf.image.resize( image, tf.cast(size, tf.int32), method=method) image_scale = size / orignal_size image_info = tf.stack([ tf.cast(orignal_size, dtype=tf.float32), tf.cast(size, dtype=tf.float32), tf.cast(image_scale, tf.float32), tf.constant([0.0, 0.0], dtype=tf.float32) ]) return rescaled_image, image_info def center_crop_image( image, center_crop_fraction: float = CENTER_CROP_FRACTION): """Center crop a square shape slice from the input image. It crops a square shape slice from the image. The side of the actual crop is 224 / 256 = 0.875 of the short side of the original image. References: [1] Very Deep Convolutional Networks for Large-Scale Image Recognition https://arxiv.org/abs/1409.1556 [2] Deep Residual Learning for Image Recognition https://arxiv.org/abs/1512.03385 Args: image: a Tensor of shape [height, width, 3] representing the input image. center_crop_fraction: a float of ratio between the side of the cropped image and the short side of the original image Returns: cropped_image: a Tensor representing the center cropped image. """ with tf.name_scope('center_crop_image'): image_size = tf.cast(tf.shape(image)[:2], dtype=tf.float32) crop_size = ( center_crop_fraction * tf.math.minimum(image_size[0], image_size[1])) crop_offset = tf.cast((image_size - crop_size) / 2.0, dtype=tf.int32) crop_size = tf.cast(crop_size, dtype=tf.int32) cropped_image = image[ crop_offset[0]:crop_offset[0] + crop_size, crop_offset[1]:crop_offset[1] + crop_size, :] return cropped_image def center_crop_image_v2( image_bytes, image_shape, center_crop_fraction: float = CENTER_CROP_FRACTION ): """Center crop a square shape slice from the input image. It crops a square shape slice from the image. The side of the actual crop is 224 / 256 = 0.875 of the short side of the original image. References: [1] Very Deep Convolutional Networks for Large-Scale Image Recognition https://arxiv.org/abs/1409.1556 [2] Deep Residual Learning for Image Recognition https://arxiv.org/abs/1512.03385 This is a faster version of `center_crop_image` which takes the original image bytes and image size as the inputs, and partially decode the JPEG bytes according to the center crop. Args: image_bytes: a Tensor of type string representing the raw image bytes. image_shape: a Tensor specifying the shape of the raw image. center_crop_fraction: a float of ratio between the side of the cropped image and the short side of the original image Returns: cropped_image: a Tensor representing the center cropped image. """ with tf.name_scope('center_image_crop_v2'): image_shape = tf.cast(image_shape, tf.float32) crop_size = center_crop_fraction * tf.math.minimum( image_shape[0], image_shape[1] ) crop_offset = tf.cast((image_shape - crop_size) / 2.0, dtype=tf.int32) crop_size = tf.cast(crop_size, dtype=tf.int32) crop_window = tf.stack( [crop_offset[0], crop_offset[1], crop_size, crop_size]) cropped_image = tf.image.decode_and_crop_jpeg( image_bytes, crop_window, channels=3) return cropped_image def random_crop_image(image, aspect_ratio_range=(3. / 4., 4. / 3.), area_range=(0.08, 1.0), max_attempts=10, seed=1): """Randomly crop an arbitrary shaped slice from the input image. Args: image: a Tensor of shape [height, width, 3] representing the input image. aspect_ratio_range: a list of floats. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: a list of floats. The cropped reas of the image must contain a fraction of the input image within this range. max_attempts: the number of attempts at generating a cropped region of the image of the specified constraints. After max_attempts failures, return the entire image. seed: the seed of the random generator. Returns: cropped_image: a Tensor representing the random cropped image. Can be the original image if max_attempts is exhausted. """ with tf.name_scope('random_crop_image'): crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box( tf.shape(image), tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]), seed=seed, min_object_covered=area_range[0], aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts) cropped_image = tf.slice(image, crop_offset, crop_size) return cropped_image def random_crop_image_v2(image_bytes, image_shape, aspect_ratio_range=(3. / 4., 4. / 3.), area_range=(0.08, 1.0), max_attempts=10, seed=1): """Randomly crop an arbitrary shaped slice from the input image. This is a faster version of `random_crop_image` which takes the original image bytes and image size as the inputs, and partially decode the JPEG bytes according to the generated crop. Args: image_bytes: a Tensor of type string representing the raw image bytes. image_shape: a Tensor specifying the shape of the raw image. aspect_ratio_range: a list of floats. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: a list of floats. The cropped reas of the image must contain a fraction of the input image within this range. max_attempts: the number of attempts at generating a cropped region of the image of the specified constraints. After max_attempts failures, return the entire image. seed: the seed of the random generator. Returns: cropped_image: a Tensor representing the random cropped image. Can be the original image if max_attempts is exhausted. """ with tf.name_scope('random_crop_image_v2'): crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box( image_shape, tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]), seed=seed, min_object_covered=area_range[0], aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts) offset_y, offset_x, _ = tf.unstack(crop_offset) crop_height, crop_width, _ = tf.unstack(crop_size) crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width]) cropped_image = tf.image.decode_and_crop_jpeg( image_bytes, crop_window, channels=3) return cropped_image def resize_and_crop_boxes(boxes, image_scale, output_size, offset): """Resizes boxes to output size with scale and offset. Args: boxes: `Tensor` of shape [N, 4] representing ground truth boxes. image_scale: 2D float `Tensor` representing scale factors that apply to [height, width] of input image. output_size: 2D `Tensor` or `int` representing [height, width] of target output image size. offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled boxes. Returns: boxes: `Tensor` of shape [N, 4] representing the scaled boxes. """ with tf.name_scope('resize_and_crop_boxes'): # Adjusts box coordinates based on image_scale and offset. boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2]) boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2]) # Clips the boxes. boxes = box_ops.clip_boxes(boxes, output_size) return boxes def resize_and_crop_masks(masks, image_scale, output_size, offset): """Resizes boxes to output size with scale and offset. Args: masks: `Tensor` of shape [N, H, W, C] representing ground truth masks. image_scale: 2D float `Tensor` representing scale factors that apply to [height, width] of input image. output_size: 2D `Tensor` or `int` representing [height, width] of target output image size. offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled boxes. Returns: masks: `Tensor` of shape [N, H, W, C] representing the scaled masks. """ with tf.name_scope('resize_and_crop_masks'): mask_size = tf.cast(tf.shape(masks)[1:3], tf.float32) num_channels = tf.shape(masks)[3] # Pad masks to avoid empty mask annotations. masks = tf.concat([ tf.zeros([1, mask_size[0], mask_size[1], num_channels], dtype=masks.dtype), masks ], axis=0) scaled_size = tf.cast(image_scale * mask_size, tf.int32) scaled_masks = tf.image.resize( masks, scaled_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) offset = tf.cast(offset, tf.int32) scaled_masks = scaled_masks[ :, offset[0]:offset[0] + output_size[0], offset[1]:offset[1] + output_size[1], :] output_masks = tf.image.pad_to_bounding_box( scaled_masks, 0, 0, output_size[0], output_size[1]) # Remove padding. output_masks = output_masks[1::] return output_masks def horizontal_flip_image(image): """Flips image horizontally.""" return tf.image.flip_left_right(image) def horizontal_flip_masks(masks): """Flips masks horizontally.""" return masks[:, :, ::-1] def random_horizontal_flip( image, normalized_boxes=None, masks=None, seed=1, prob=0.5 ): """Randomly flips input image and bounding boxes horizontally.""" with tf.name_scope('random_horizontal_flip'): do_flip = tf.less(tf.random.uniform([], seed=seed), prob) image = tf.cond( do_flip, lambda: horizontal_flip_image(image), lambda: image) if normalized_boxes is not None: normalized_boxes = tf.cond( do_flip, lambda: horizontal_flip_boxes(normalized_boxes), lambda: normalized_boxes) if masks is not None: masks = tf.cond( do_flip, lambda: horizontal_flip_masks(masks), lambda: masks) return image, normalized_boxes, masks def random_horizontal_flip_with_roi( image: tf.Tensor, boxes: Optional[tf.Tensor] = None, masks: Optional[tf.Tensor] = None, roi_boxes: Optional[tf.Tensor] = None, seed: int = 1 ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor], Optional[tf.Tensor]]: """Randomly flips input image and bounding boxes horizontally. Extends preprocess_ops.random_horizontal_flip to also flip roi_boxes used by ViLD. Args: image: `tf.Tensor`, the image to apply the random flip. boxes: `tf.Tensor` or `None`, boxes corresponding to the image. masks: `tf.Tensor` or `None`, masks corresponding to the image. roi_boxes: `tf.Tensor` or `None`, RoIs corresponding to the image. seed: Seed for Tensorflow's random number generator. Returns: image: `tf.Tensor`, flipped image. boxes: `tf.Tensor` or `None`, flipped boxes corresponding to the image. masks: `tf.Tensor` or `None`, flipped masks corresponding to the image. roi_boxes: `tf.Tensor` or `None`, flipped RoIs corresponding to the image. """ with tf.name_scope('random_horizontal_flip'): do_flip = tf.greater(tf.random.uniform([], seed=seed), 0.5) image = tf.cond(do_flip, lambda: horizontal_flip_image(image), lambda: image) if boxes is not None: boxes = tf.cond(do_flip, lambda: horizontal_flip_boxes(boxes), lambda: boxes) if masks is not None: masks = tf.cond(do_flip, lambda: horizontal_flip_masks(masks), lambda: masks) if roi_boxes is not None: roi_boxes = tf.cond(do_flip, lambda: horizontal_flip_boxes(roi_boxes), lambda: roi_boxes) return image, boxes, masks, roi_boxes def random_vertical_flip( image, normalized_boxes=None, masks=None, seed=1, prob=0.5 ): """Randomly flips input image and bounding boxes vertically.""" with tf.name_scope('random_vertical_flip'): do_flip = tf.less(tf.random.uniform([], seed=seed), prob) image = tf.cond( do_flip, lambda: tf.image.flip_up_down(image), lambda: image) if normalized_boxes is not None: normalized_boxes = tf.cond( do_flip, lambda: vertical_flip_boxes(normalized_boxes), lambda: normalized_boxes) if masks is not None: masks = tf.cond( do_flip, lambda: tf.image.flip_up_down(masks[..., None])[..., 0], lambda: masks) return image, normalized_boxes, masks def color_jitter(image: tf.Tensor, brightness: Optional[float] = 0., contrast: Optional[float] = 0., saturation: Optional[float] = 0., seed: Optional[int] = None) -> tf.Tensor: """Applies color jitter to an image, similarly to torchvision`s ColorJitter. Args: image (tf.Tensor): Of shape [height, width, 3] and type uint8. brightness (float, optional): Magnitude for brightness jitter. Defaults to 0. contrast (float, optional): Magnitude for contrast jitter. Defaults to 0. saturation (float, optional): Magnitude for saturation jitter. Defaults to 0. seed (int, optional): Random seed. Defaults to None. Returns: tf.Tensor: The augmented `image` of type uint8. """ image = tf.cast(image, dtype=tf.uint8) image = random_brightness(image, brightness, seed=seed) image = random_contrast(image, contrast, seed=seed) image = random_saturation(image, saturation, seed=seed) return image def random_brightness(image: tf.Tensor, brightness: float = 0., seed: Optional[int] = None) -> tf.Tensor: """Jitters brightness of an image. Args: image (tf.Tensor): Of shape [height, width, 3] and type uint8. brightness (float, optional): Magnitude for brightness jitter. Defaults to 0. seed (int, optional): Random seed. Defaults to None. Returns: tf.Tensor: The augmented `image` of type uint8. """ assert brightness >= 0, '`brightness` must be positive' brightness = tf.random.uniform([], max(0, 1 - brightness), 1 + brightness, seed=seed, dtype=tf.float32) return augment.brightness(image, brightness) def random_contrast(image: tf.Tensor, contrast: float = 0., seed: Optional[int] = None) -> tf.Tensor: """Jitters contrast of an image, similarly to torchvision`s ColorJitter. Args: image (tf.Tensor): Of shape [height, width, 3] and type uint8. contrast (float, optional): Magnitude for contrast jitter. Defaults to 0. seed (int, optional): Random seed. Defaults to None. Returns: tf.Tensor: The augmented `image` of type uint8. """ assert contrast >= 0, '`contrast` must be positive' contrast = tf.random.uniform([], max(0, 1 - contrast), 1 + contrast, seed=seed, dtype=tf.float32) return augment.contrast(image, contrast) def random_saturation(image: tf.Tensor, saturation: float = 0., seed: Optional[int] = None) -> tf.Tensor: """Jitters saturation of an image, similarly to torchvision`s ColorJitter. Args: image (tf.Tensor): Of shape [height, width, 3] and type uint8. saturation (float, optional): Magnitude for saturation jitter. Defaults to 0. seed (int, optional): Random seed. Defaults to None. Returns: tf.Tensor: The augmented `image` of type uint8. """ assert saturation >= 0, '`saturation` must be positive' saturation = tf.random.uniform([], max(0, 1 - saturation), 1 + saturation, seed=seed, dtype=tf.float32) return _saturation(image, saturation) def _saturation(image: tf.Tensor, saturation: Optional[float] = 0.) -> tf.Tensor: return augment.blend( tf.repeat(tf.image.rgb_to_grayscale(image), 3, axis=-1), image, saturation) def random_crop_image_with_boxes_and_labels(img, boxes, labels, min_scale, aspect_ratio_range, min_overlap_params, max_retry): """Crops a random slice from the input image. The function will correspondingly recompute the bounding boxes and filter out outside boxes and their labels. References: [1] End-to-End Object Detection with Transformers https://arxiv.org/abs/2005.12872 The preprocessing steps: 1. Sample a minimum IoU overlap. 2. For each trial, sample the new image width, height, and top-left corner. 3. Compute the IoUs of bounding boxes with the cropped image and retry if the maximum IoU is below the sampled threshold. 4. Find boxes whose centers are in the cropped image. 5. Compute new bounding boxes in the cropped region and only select those boxes' labels. Args: img: a 'Tensor' of shape [height, width, 3] representing the input image. boxes: a 'Tensor' of shape [N, 4] representing the ground-truth bounding boxes with (ymin, xmin, ymax, xmax). labels: a 'Tensor' of shape [N,] representing the class labels of the boxes. min_scale: a 'float' in [0.0, 1.0) indicating the lower bound of the random scale variable. aspect_ratio_range: a list of two 'float' that specifies the lower and upper bound of the random aspect ratio. min_overlap_params: a list of four 'float' representing the min value, max value, step size, and offset for the minimum overlap sample. max_retry: an 'int' representing the number of trials for cropping. If it is exhausted, no cropping will be performed. Returns: img: a Tensor representing the random cropped image. Can be the original image if max_retry is exhausted. boxes: a Tensor representing the bounding boxes in the cropped image. labels: a Tensor representing the new bounding boxes' labels. """ shape = tf.shape(img) original_h = shape[0] original_w = shape[1] minval, maxval, step, offset = min_overlap_params min_overlap = tf.math.floordiv( tf.random.uniform([], minval=minval, maxval=maxval), step) * step - offset min_overlap = tf.clip_by_value(min_overlap, 0.0, 1.1) if min_overlap > 1.0: return img, boxes, labels aspect_ratio_low = aspect_ratio_range[0] aspect_ratio_high = aspect_ratio_range[1] for _ in tf.range(max_retry): scale_h = tf.random.uniform([], min_scale, 1.0) scale_w = tf.random.uniform([], min_scale, 1.0) new_h = tf.cast( scale_h * tf.cast(original_h, dtype=tf.float32), dtype=tf.int32) new_w = tf.cast( scale_w * tf.cast(original_w, dtype=tf.float32), dtype=tf.int32) # Aspect ratio has to be in the prespecified range aspect_ratio = new_h / new_w if aspect_ratio_low > aspect_ratio or aspect_ratio > aspect_ratio_high: continue left = tf.random.uniform([], 0, original_w - new_w, dtype=tf.int32) right = left + new_w top = tf.random.uniform([], 0, original_h - new_h, dtype=tf.int32) bottom = top + new_h normalized_left = tf.cast( left, dtype=tf.float32) / tf.cast( original_w, dtype=tf.float32) normalized_right = tf.cast( right, dtype=tf.float32) / tf.cast( original_w, dtype=tf.float32) normalized_top = tf.cast( top, dtype=tf.float32) / tf.cast( original_h, dtype=tf.float32) normalized_bottom = tf.cast( bottom, dtype=tf.float32) / tf.cast( original_h, dtype=tf.float32) cropped_box = tf.expand_dims( tf.stack([ normalized_top, normalized_left, normalized_bottom, normalized_right, ]), axis=0) iou = box_ops.bbox_overlap( tf.expand_dims(cropped_box, axis=0), tf.expand_dims(boxes, axis=0)) # (1, 1, n_ground_truth) iou = tf.squeeze(iou, axis=[0, 1]) # If not a single bounding box has a Jaccard overlap of greater than # the minimum, try again if tf.reduce_max(iou) < min_overlap: continue centroids = box_ops.yxyx_to_cycxhw(boxes) mask = tf.math.logical_and( tf.math.logical_and(centroids[:, 0] > normalized_top, centroids[:, 0] < normalized_bottom), tf.math.logical_and(centroids[:, 1] > normalized_left, centroids[:, 1] < normalized_right)) # If not a single bounding box has its center in the crop, try again. if tf.reduce_sum(tf.cast(mask, dtype=tf.int32)) > 0: indices = tf.squeeze(tf.where(mask), axis=1) filtered_boxes = tf.gather(boxes, indices) boxes = tf.clip_by_value( (filtered_boxes[..., :] * tf.cast( tf.stack([original_h, original_w, original_h, original_w]), dtype=tf.float32) - tf.cast(tf.stack([top, left, top, left]), dtype=tf.float32)) / tf.cast(tf.stack([new_h, new_w, new_h, new_w]), dtype=tf.float32), 0.0, 1.0) img = tf.image.crop_to_bounding_box(img, top, left, bottom - top, right - left) labels = tf.gather(labels, indices) break return img, boxes, labels def random_crop(image, boxes, labels, min_scale=0.3, aspect_ratio_range=(0.5, 2.0), min_overlap_params=(0.0, 1.4, 0.2, 0.1), max_retry=50, seed=None): """Randomly crop the image and boxes, filtering labels. Args: image: a 'Tensor' of shape [height, width, 3] representing the input image. boxes: a 'Tensor' of shape [N, 4] representing the ground-truth bounding boxes with (ymin, xmin, ymax, xmax). labels: a 'Tensor' of shape [N,] representing the class labels of the boxes. min_scale: a 'float' in [0.0, 1.0) indicating the lower bound of the random scale variable. aspect_ratio_range: a list of two 'float' that specifies the lower and upper bound of the random aspect ratio. min_overlap_params: a list of four 'float' representing the min value, max value, step size, and offset for the minimum overlap sample. max_retry: an 'int' representing the number of trials for cropping. If it is exhausted, no cropping will be performed. seed: the random number seed of int, but could be None. Returns: image: a Tensor representing the random cropped image. Can be the original image if max_retry is exhausted. boxes: a Tensor representing the bounding boxes in the cropped image. labels: a Tensor representing the new bounding boxes' labels. """ with tf.name_scope('random_crop'): do_crop = tf.greater(tf.random.uniform([], seed=seed), 0.5) if do_crop: return random_crop_image_with_boxes_and_labels(image, boxes, labels, min_scale, aspect_ratio_range, min_overlap_params, max_retry) else: return image, boxes, labels
39,642
37.228544
80
py
models
models-master/official/vision/ops/augment.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Augmentation policies for enhanced image/video preprocessing. AutoAugment Reference: - AutoAugment Reference: https://arxiv.org/abs/1805.09501 - AutoAugment for Object Detection Reference: https://arxiv.org/abs/1906.11172 RandAugment Reference: https://arxiv.org/abs/1909.13719 RandomErasing Reference: https://arxiv.org/abs/1708.04896 MixupAndCutmix: - Mixup: https://arxiv.org/abs/1710.09412 - Cutmix: https://arxiv.org/abs/1905.04899 RandomErasing, Mixup and Cutmix are inspired by https://github.com/rwightman/pytorch-image-models """ import inspect import math from typing import Any, List, Iterable, Optional, Tuple, Union import numpy as np import tensorflow as tf # This signifies the max integer that the controller RNN could predict for the # augmentation scheme. _MAX_LEVEL = 10. def to_4d(image: tf.Tensor) -> tf.Tensor: """Converts an input Tensor to 4 dimensions. 4D image => [N, H, W, C] or [N, C, H, W] 3D image => [1, H, W, C] or [1, C, H, W] 2D image => [1, H, W, 1] Args: image: The 2/3/4D input tensor. Returns: A 4D image tensor. Raises: `TypeError` if `image` is not a 2/3/4D tensor. """ shape = tf.shape(image) original_rank = tf.rank(image) left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32) right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32) new_shape = tf.concat( [ tf.ones(shape=left_pad, dtype=tf.int32), shape, tf.ones(shape=right_pad, dtype=tf.int32), ], axis=0, ) return tf.reshape(image, new_shape) def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor: """Converts a 4D image back to `ndims` rank.""" shape = tf.shape(image) begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32) end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32) new_shape = shape[begin:end] return tf.reshape(image, new_shape) def _pad( image: tf.Tensor, filter_shape: Union[List[int], Tuple[int, ...]], mode: str = 'CONSTANT', constant_values: Union[int, tf.Tensor] = 0, ) -> tf.Tensor: """Explicitly pads a 4-D image. Equivalent to the implicit padding method offered in `tf.nn.conv2d` and `tf.nn.depthwise_conv2d`, but supports non-zero, reflect and symmetric padding mode. For the even-sized filter, it pads one more value to the right or the bottom side. Args: image: A 4-D `Tensor` of shape `[batch_size, height, width, channels]`. filter_shape: A `tuple`/`list` of 2 integers, specifying the height and width of the 2-D filter. mode: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC". The type of padding algorithm to use, which is compatible with `mode` argument in `tf.pad`. For more details, please refer to https://www.tensorflow.org/api_docs/python/tf/pad. constant_values: A `scalar`, the pad value to use in "CONSTANT" padding mode. Returns: A padded image. """ if mode.upper() not in {'REFLECT', 'CONSTANT', 'SYMMETRIC'}: raise ValueError( 'padding should be one of "REFLECT", "CONSTANT", or "SYMMETRIC".' ) constant_values = tf.convert_to_tensor(constant_values, image.dtype) filter_height, filter_width = filter_shape pad_top = (filter_height - 1) // 2 pad_bottom = filter_height - 1 - pad_top pad_left = (filter_width - 1) // 2 pad_right = filter_width - 1 - pad_left paddings = [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]] return tf.pad(image, paddings, mode=mode, constant_values=constant_values) def _get_gaussian_kernel(sigma, filter_shape): """Computes 1D Gaussian kernel.""" sigma = tf.convert_to_tensor(sigma) x = tf.range(-filter_shape // 2 + 1, filter_shape // 2 + 1) x = tf.cast(x**2, sigma.dtype) x = tf.nn.softmax(-x / (2.0 * (sigma**2))) return x def _get_gaussian_kernel_2d(gaussian_filter_x, gaussian_filter_y): """Computes 2D Gaussian kernel given 1D kernels.""" gaussian_kernel = tf.matmul(gaussian_filter_x, gaussian_filter_y) return gaussian_kernel def _normalize_tuple(value, n, name): """Transforms an integer or iterable of integers into an integer tuple. Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError as exc: raise TypeError( f'The {name} argument must be a tuple of {n} integers. ' f'Received: {value}' ) from exc if len(value_tuple) != n: raise ValueError( f'The {name} argument must be a tuple of {n} integers. ' f'Received: {value}' ) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError) as exc: raise ValueError( f'The {name} argument must be a tuple of {n} integers. Received:' f' {value} including element {single_value} of type' f' {type(single_value)}.' ) from exc return value_tuple def gaussian_filter2d( image: tf.Tensor, filter_shape: Union[List[int], Tuple[int, ...], int], sigma: Union[List[float], Tuple[float], float] = 1.0, padding: str = 'REFLECT', constant_values: Union[int, tf.Tensor] = 0, name: Optional[str] = None, ) -> tf.Tensor: """Performs Gaussian blur on image(s). Args: image: Either a 2-D `Tensor` of shape `[height, width]`, a 3-D `Tensor` of shape `[height, width, channels]`, or a 4-D `Tensor` of shape `[batch_size, height, width, channels]`. filter_shape: An `integer` or `tuple`/`list` of 2 integers, specifying the height and width of the 2-D gaussian filter. Can be a single integer to specify the same value for all spatial dimensions. sigma: A `float` or `tuple`/`list` of 2 floats, specifying the standard deviation in x and y direction the 2-D gaussian filter. Can be a single float to specify the same value for all spatial dimensions. padding: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC". The type of padding algorithm to use, which is compatible with `mode` argument in `tf.pad`. For more details, please refer to https://www.tensorflow.org/api_docs/python/tf/pad. constant_values: A `scalar`, the pad value to use in "CONSTANT" padding mode. name: A name for this operation (optional). Returns: 2-D, 3-D or 4-D `Tensor` of the same dtype as input. Raises: ValueError: If `image` is not 2, 3 or 4-dimensional, if `padding` is other than "REFLECT", "CONSTANT" or "SYMMETRIC", if `filter_shape` is invalid, or if `sigma` is invalid. """ with tf.name_scope(name or 'gaussian_filter2d'): if isinstance(sigma, (list, tuple)): if len(sigma) != 2: raise ValueError('sigma should be a float or a tuple/list of 2 floats') else: sigma = (sigma,) * 2 if any(s < 0 for s in sigma): raise ValueError('sigma should be greater than or equal to 0.') image = tf.convert_to_tensor(image, name='image') sigma = tf.convert_to_tensor(sigma, name='sigma') original_ndims = tf.rank(image) image = to_4d(image) # Keep the precision if it's float; # otherwise, convert to float32 for computing. orig_dtype = image.dtype if not image.dtype.is_floating: image = tf.cast(image, tf.float32) channels = tf.shape(image)[3] filter_shape = _normalize_tuple(filter_shape, 2, 'filter_shape') sigma = tf.cast(sigma, image.dtype) gaussian_kernel_x = _get_gaussian_kernel(sigma[1], filter_shape[1]) gaussian_kernel_x = gaussian_kernel_x[tf.newaxis, :] gaussian_kernel_y = _get_gaussian_kernel(sigma[0], filter_shape[0]) gaussian_kernel_y = gaussian_kernel_y[:, tf.newaxis] gaussian_kernel_2d = _get_gaussian_kernel_2d( gaussian_kernel_y, gaussian_kernel_x ) gaussian_kernel_2d = gaussian_kernel_2d[:, :, tf.newaxis, tf.newaxis] gaussian_kernel_2d = tf.tile(gaussian_kernel_2d, [1, 1, channels, 1]) image = _pad( image, filter_shape, mode=padding, constant_values=constant_values ) output = tf.nn.depthwise_conv2d( input=image, filter=gaussian_kernel_2d, strides=(1, 1, 1, 1), padding='VALID', ) output = from_4d(output, original_ndims) return tf.cast(output, orig_dtype) def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor: """Converts translations to a projective transform. The translation matrix looks like this: [[1 0 -dx] [0 1 -dy] [0 0 1]] Args: translations: The 2-element list representing [dx, dy], or a matrix of 2-element lists representing [dx dy] to translate for each image. The shape must be static. Returns: The transformation matrix of shape (num_images, 8). Raises: `TypeError` if - the shape of `translations` is not known or - the shape of `translations` is not rank 1 or 2. """ translations = tf.convert_to_tensor(translations, dtype=tf.float32) if translations.get_shape().ndims is None: raise TypeError('translations rank must be statically known') elif len(translations.get_shape()) == 1: translations = translations[None] elif len(translations.get_shape()) != 2: raise TypeError('translations should have rank 1 or 2.') num_translations = tf.shape(translations)[0] return tf.concat( values=[ tf.ones((num_translations, 1), tf.dtypes.float32), tf.zeros((num_translations, 1), tf.dtypes.float32), -translations[:, 0, None], tf.zeros((num_translations, 1), tf.dtypes.float32), tf.ones((num_translations, 1), tf.dtypes.float32), -translations[:, 1, None], tf.zeros((num_translations, 2), tf.dtypes.float32), ], axis=1, ) def _convert_angles_to_transform(angles: tf.Tensor, image_width: tf.Tensor, image_height: tf.Tensor) -> tf.Tensor: """Converts an angle or angles to a projective transform. Args: angles: A scalar to rotate all images, or a vector to rotate a batch of images. This must be a scalar. image_width: The width of the image(s) to be transformed. image_height: The height of the image(s) to be transformed. Returns: A tensor of shape (num_images, 8). Raises: `TypeError` if `angles` is not rank 0 or 1. """ angles = tf.convert_to_tensor(angles, dtype=tf.float32) if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test angles = angles[None] elif len(angles.get_shape()) != 1: raise TypeError('Angles should have a rank 0 or 1.') x_offset = ((image_width - 1) - (tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) * (image_height - 1))) / 2.0 y_offset = ((image_height - 1) - (tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) * (image_height - 1))) / 2.0 num_angles = tf.shape(angles)[0] return tf.concat( values=[ tf.math.cos(angles)[:, None], -tf.math.sin(angles)[:, None], x_offset[:, None], tf.math.sin(angles)[:, None], tf.math.cos(angles)[:, None], y_offset[:, None], tf.zeros((num_angles, 2), tf.dtypes.float32), ], axis=1, ) def _apply_transform_to_images( images, transforms, fill_mode='reflect', fill_value=0.0, interpolation='bilinear', output_shape=None, name=None, ): """Applies the given transform(s) to the image(s). Args: images: A tensor of shape `(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank must be statically known (the shape is not `TensorShape(None)`). transforms: Projective transform matrix/matrices. A vector of length 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the transform mapping input points to output points. Note that gradients are not backpropagated into transformation parameters. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`). fill_value: a float represents the value to be filled outside the boundaries when `fill_mode="constant"`. interpolation: Interpolation mode. Supported values: `"nearest"`, `"bilinear"`. output_shape: Output dimension after the transform, `[height, width]`. If `None`, output is the same size as input image. name: The name of the op. Fill mode behavior for each valid value is as follows - `"reflect"`: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - `"constant"`: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - `"wrap"`: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - `"nearest"`: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, in `"channels_last"` format. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, in `"channels_last"` format. Returns: Image(s) with the same type and shape as `images`, with the given transform(s) applied. Transformed coordinates outside of the input image will be filled with zeros. """ with tf.name_scope(name or 'transform'): if output_shape is None: output_shape = tf.shape(images)[1:3] if not tf.executing_eagerly(): output_shape_value = tf.get_static_value(output_shape) if output_shape_value is not None: output_shape = output_shape_value output_shape = tf.convert_to_tensor( output_shape, tf.int32, name='output_shape' ) if not output_shape.get_shape().is_compatible_with([2]): raise ValueError( 'output_shape must be a 1-D Tensor of 2 elements: ' 'new_height, new_width, instead got ' f'output_shape={output_shape}' ) fill_value = tf.convert_to_tensor(fill_value, tf.float32, name='fill_value') return tf.raw_ops.ImageProjectiveTransformV3( images=images, output_shape=output_shape, fill_value=fill_value, transforms=transforms, fill_mode=fill_mode.upper(), interpolation=interpolation.upper(), ) def transform( image: tf.Tensor, transforms: Any, interpolation: str = 'nearest', output_shape=None, fill_mode: str = 'reflect', fill_value: float = 0.0, ) -> tf.Tensor: """Transforms an image.""" original_ndims = tf.rank(image) transforms = tf.convert_to_tensor(transforms, dtype=tf.float32) if transforms.shape.rank == 1: transforms = transforms[None] image = to_4d(image) image = _apply_transform_to_images( images=image, transforms=transforms, interpolation=interpolation, fill_mode=fill_mode, fill_value=fill_value, output_shape=output_shape, ) return from_4d(image, original_ndims) def translate( image: tf.Tensor, translations, fill_value: float = 0.0, fill_mode: str = 'reflect', interpolation: str = 'nearest', ) -> tf.Tensor: """Translates image(s) by provided vectors. Args: image: An image Tensor of type uint8. translations: A vector or matrix representing [dx dy]. fill_value: a float represents the value to be filled outside the boundaries when `fill_mode="constant"`. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`). interpolation: Interpolation mode. Supported values: `"nearest"`, `"bilinear"`. Returns: The translated version of the image. """ transforms = _convert_translation_to_transform(translations) # pytype: disable=wrong-arg-types # always-use-return-annotations return transform( image, transforms=transforms, interpolation=interpolation, fill_value=fill_value, fill_mode=fill_mode, ) def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor: """Rotates the image by degrees either clockwise or counterclockwise. Args: image: An image Tensor of type uint8. degrees: Float, a scalar angle in degrees to rotate all images by. If degrees is positive the image will be rotated clockwise otherwise it will be rotated counterclockwise. Returns: The rotated version of image. """ # Convert from degrees to radians. degrees_to_radians = math.pi / 180.0 radians = tf.cast(degrees * degrees_to_radians, tf.float32) original_ndims = tf.rank(image) image = to_4d(image) image_height = tf.cast(tf.shape(image)[1], tf.float32) image_width = tf.cast(tf.shape(image)[2], tf.float32) transforms = _convert_angles_to_transform( angles=radians, image_width=image_width, image_height=image_height) # In practice, we should randomize the rotation degrees by flipping # it negatively half the time, but that's done on 'degrees' outside # of the function. image = transform(image, transforms=transforms) return from_4d(image, original_ndims) def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor: """Blend image1 and image2 using 'factor'. Factor can be above 0.0. A value of 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor of type uint8. image2: An image Tensor of type uint8. factor: A floating point value above 0.0. Returns: A blended image Tensor of type uint8. """ if factor == 0.0: return tf.convert_to_tensor(image1) if factor == 1.0: return tf.convert_to_tensor(image2) image1 = tf.cast(image1, tf.float32) image2 = tf.cast(image2, tf.float32) difference = image2 - image1 scaled = factor * difference # Do addition in float. temp = tf.cast(image1, tf.float32) + scaled # Interpolate if factor > 0.0 and factor < 1.0: # Interpolation means we always stay within 0 and 255. return tf.cast(temp, tf.uint8) # Extrapolate: # # We need to clip and then cast. return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8) def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor: """Apply cutout (https://arxiv.org/abs/1708.04552) to image. This operation applies a (2*pad_size x 2*pad_size) mask of zeros to a random location within `image`. The pixel values filled in will be of the value `replace`. The location where the mask will be applied is randomly chosen uniformly over the whole image. Args: image: An image Tensor of type uint8. pad_size: Specifies how big the zero mask that will be generated is that is applied to the image. The mask will be of size (2*pad_size x 2*pad_size). replace: What pixel value to fill in the image in the area that has the cutout mask applied to it. Returns: An image Tensor that is of type uint8. """ if image.shape.rank not in [3, 4]: raise ValueError('Bad image rank: {}'.format(image.shape.rank)) if image.shape.rank == 4: return cutout_video(image, replace=replace) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] # Sample the center location in the image where the zero mask will be applied. cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32) image = _fill_rectangle(image, cutout_center_width, cutout_center_height, pad_size, pad_size, replace) return image def _fill_rectangle(image, center_width, center_height, half_width, half_height, replace=None): """Fills blank area.""" image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] lower_pad = tf.maximum(0, center_height - half_height) upper_pad = tf.maximum(0, image_height - center_height - half_height) left_pad = tf.maximum(0, center_width - half_width) right_pad = tf.maximum(0, image_width - center_width - half_width) cutout_shape = [ image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad) ] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=image.dtype), padding_dims, constant_values=1) mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, 3]) if replace is None: fill = tf.random.normal(tf.shape(image), dtype=image.dtype) elif isinstance(replace, tf.Tensor): fill = replace else: fill = tf.ones_like(image, dtype=image.dtype) * replace image = tf.where(tf.equal(mask, 0), fill, image) return image def _fill_rectangle_video(image, center_width, center_height, half_width, half_height, replace=None): """Fills blank area for video.""" image_time = tf.shape(image)[0] image_height = tf.shape(image)[1] image_width = tf.shape(image)[2] lower_pad = tf.maximum(0, center_height - half_height) upper_pad = tf.maximum(0, image_height - center_height - half_height) left_pad = tf.maximum(0, center_width - half_width) right_pad = tf.maximum(0, image_width - center_width - half_width) cutout_shape = [ image_time, image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad) ] padding_dims = [[0, 0], [lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=image.dtype), padding_dims, constant_values=1) mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, 1, 3]) if replace is None: fill = tf.random.normal(tf.shape(image), dtype=image.dtype) elif isinstance(replace, tf.Tensor): fill = replace else: fill = tf.ones_like(image, dtype=image.dtype) * replace image = tf.where(tf.equal(mask, 0), fill, image) return image def cutout_video( video: tf.Tensor, mask_shape: Optional[tf.Tensor] = None, replace: int = 0, ) -> tf.Tensor: """Apply cutout (https://arxiv.org/abs/1708.04552) to a video. This operation applies a random size 3D mask of zeros to a random location within `video`. The mask is padded The pixel values filled in will be of the value `replace`. The location where the mask will be applied is randomly chosen uniformly over the whole video. If the size of the mask is not set, then, it is randomly sampled uniformly from [0.25*height, 0.5*height], [0.25*width, 0.5*width], and [1, 0.25*depth], which represent the height, width, and number of frames of the input video tensor respectively. Args: video: A video Tensor of shape [T, H, W, C]. mask_shape: An optional integer tensor that specifies the depth, height and width of the mask to cut. If it is not set, the shape is randomly sampled as described above. The shape dimensions should be divisible by 2 otherwise they will rounded down. replace: What pixel value to fill in the image in the area that has the cutout mask applied to it. Returns: A video Tensor with cutout applied. """ tf.debugging.assert_shapes([ (video, ('T', 'H', 'W', 'C')), ]) video_depth = tf.shape(video)[0] video_height = tf.shape(video)[1] video_width = tf.shape(video)[2] # Sample the center location in the image where the zero mask will be applied. cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=video_height, dtype=tf.int32 ) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=video_width, dtype=tf.int32 ) cutout_center_depth = tf.random.uniform( shape=[], minval=0, maxval=video_depth, dtype=tf.int32 ) if mask_shape is not None: pad_shape = tf.maximum(1, mask_shape // 2) pad_size_depth, pad_size_height, pad_size_width = ( pad_shape[0], pad_shape[1], pad_shape[2], ) else: pad_size_height = tf.random.uniform( shape=[], minval=tf.maximum(1, tf.cast(video_height / 4, tf.int32)), maxval=tf.maximum(2, tf.cast(video_height / 2, tf.int32)), dtype=tf.int32, ) pad_size_width = tf.random.uniform( shape=[], minval=tf.maximum(1, tf.cast(video_width / 4, tf.int32)), maxval=tf.maximum(2, tf.cast(video_width / 2, tf.int32)), dtype=tf.int32, ) pad_size_depth = tf.random.uniform( shape=[], minval=1, maxval=tf.maximum(2, tf.cast(video_depth / 4, tf.int32)), dtype=tf.int32, ) lower_pad = tf.maximum(0, cutout_center_height - pad_size_height) upper_pad = tf.maximum( 0, video_height - cutout_center_height - pad_size_height ) left_pad = tf.maximum(0, cutout_center_width - pad_size_width) right_pad = tf.maximum(0, video_width - cutout_center_width - pad_size_width) back_pad = tf.maximum(0, cutout_center_depth - pad_size_depth) forward_pad = tf.maximum( 0, video_depth - cutout_center_depth - pad_size_depth ) cutout_shape = [ video_depth - (back_pad + forward_pad), video_height - (lower_pad + upper_pad), video_width - (left_pad + right_pad), ] padding_dims = [[back_pad, forward_pad], [lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=video.dtype), padding_dims, constant_values=1 ) mask = tf.expand_dims(mask, -1) num_channels = tf.shape(video)[-1] mask = tf.tile(mask, [1, 1, 1, num_channels]) video = tf.where( tf.equal(mask, 0), tf.ones_like(video, dtype=video.dtype) * replace, video ) return video def gaussian_noise( image: tf.Tensor, low: float = 0.1, high: float = 2.0) -> tf.Tensor: """Add Gaussian noise to image(s).""" augmented_image = gaussian_filter2d( # pylint: disable=g-long-lambda image, filter_shape=[3, 3], sigma=np.random.uniform(low=low, high=high) ) return augmented_image def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor: """Solarize the input image(s).""" # For each pixel in the image, select the pixel # if the value is less than the threshold. # Otherwise, subtract 255 from the pixel. return tf.where(image < threshold, image, 255 - image) def solarize_add(image: tf.Tensor, addition: int = 0, threshold: int = 128) -> tf.Tensor: """Additive solarize the input image(s).""" # For each pixel in the image less than threshold # we add 'addition' amount to it and then clip the # pixel value to be between 0 and 255. The value # of 'addition' is between -128 and 128. added_image = tf.cast(image, tf.int64) + addition added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8) return tf.where(image < threshold, added_image, image) def grayscale(image: tf.Tensor) -> tf.Tensor: """Convert image to grayscale.""" return tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) def color(image: tf.Tensor, factor: float) -> tf.Tensor: """Equivalent of PIL Color.""" degenerate = grayscale(image) return blend(degenerate, image, factor) def contrast(image: tf.Tensor, factor: float) -> tf.Tensor: """Equivalent of PIL Contrast.""" degenerate = tf.image.rgb_to_grayscale(image) # Cast before calling tf.histogram. degenerate = tf.cast(degenerate, tf.int32) # Compute the grayscale histogram, then compute the mean pixel value, # and create a constant image size of that value. Use that as the # blending degenerate target of the original image. hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256) mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0 degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8)) return blend(degenerate, image, factor) def brightness(image: tf.Tensor, factor: float) -> tf.Tensor: """Equivalent of PIL Brightness.""" degenerate = tf.zeros_like(image) return blend(degenerate, image, factor) def posterize(image: tf.Tensor, bits: int) -> tf.Tensor: """Equivalent of PIL Posterize.""" shift = 8 - bits return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor: """Applies rotation with wrap/unwrap.""" image = rotate(wrap(image), degrees=degrees) return unwrap(image, replace) def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: """Equivalent of PIL Translate in X dimension.""" image = translate(wrap(image), [-pixels, 0]) return unwrap(image, replace) def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: """Equivalent of PIL Translate in Y dimension.""" image = translate(wrap(image), [0, -pixels]) return unwrap(image, replace) def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: """Equivalent of PIL Shearing in X dimension.""" # Shear parallel to x axis is a projective transform # with a matrix form of: # [1 level # 0 1]. image = transform( image=wrap(image), transforms=[1., level, 0., 0., 1., 0., 0., 0.]) return unwrap(image, replace) def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: """Equivalent of PIL Shearing in Y dimension.""" # Shear parallel to y axis is a projective transform # with a matrix form of: # [1 0 # level 1]. image = transform( image=wrap(image), transforms=[1., 0., 0., level, 1., 0., 0., 0.]) return unwrap(image, replace) def autocontrast(image: tf.Tensor) -> tf.Tensor: """Implements Autocontrast function from PIL using TF ops. Args: image: A 3D uint8 tensor. Returns: The image after it has had autocontrast applied to it and will be of type uint8. """ def scale_channel(image: tf.Tensor) -> tf.Tensor: """Scale the 2D image using the autocontrast rule.""" # A possibly cheaper version can be done using cumsum/unique_with_counts # over the histogram values, rather than iterating over the entire image. # to compute mins and maxes. lo = tf.cast(tf.reduce_min(image), tf.float32) hi = tf.cast(tf.reduce_max(image), tf.float32) # Scale the image, making the lowest value 0 and the highest value 255. def scale_values(im): scale = 255.0 / (hi - lo) offset = -lo * scale im = tf.cast(im, tf.float32) * scale + offset im = tf.clip_by_value(im, 0.0, 255.0) return tf.cast(im, tf.uint8) result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image) return result # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image[..., 0]) s2 = scale_channel(image[..., 1]) s3 = scale_channel(image[..., 2]) image = tf.stack([s1, s2, s3], -1) return image def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor: """Implements Sharpness function from PIL using TF ops.""" orig_image = image image = tf.cast(image, tf.float32) # Make image 4D for conv operation. image = tf.expand_dims(image, 0) # SMOOTH PIL Kernel. if orig_image.shape.rank == 3: kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13. # Tile across channel dimension. kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] degenerate = tf.nn.depthwise_conv2d( image, kernel, strides, padding='VALID', dilations=[1, 1]) elif orig_image.shape.rank == 4: kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[1, 3, 3, 1, 1]) / 13. strides = [1, 1, 1, 1, 1] # Run the kernel across each channel channels = tf.split(image, 3, axis=-1) degenerates = [ tf.nn.conv3d(channel, kernel, strides, padding='VALID', dilations=[1, 1, 1, 1, 1]) for channel in channels ] degenerate = tf.concat(degenerates, -1) else: raise ValueError('Bad image rank: {}'.format(image.shape.rank)) degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0]) # For the borders of the resulting image, fill in the values of the # original image. mask = tf.ones_like(degenerate) paddings = [[0, 0]] * (orig_image.shape.rank - 3) padded_mask = tf.pad(mask, paddings + [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, paddings + [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image) # Blend the final result. return blend(result, orig_image, factor) def equalize(image: tf.Tensor) -> tf.Tensor: """Implements Equalize function from PIL using TF ops.""" def scale_channel(im, c): """Scale the data in the channel to implement equalize.""" im = tf.cast(im[..., c], tf.int32) # Compute the histogram of the image channel. histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) # For the purposes of computing the step, filter out the nonzeros. nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 def build_lut(histo, step): # Compute the cumulative sum, shifting by step // 2 # and then normalization by step. lut = (tf.cumsum(histo) + (step // 2)) // step # Shift lut, prepending with 0. lut = tf.concat([[0], lut[:-1]], 0) # Clip the counts to be in range. This is done # in the C code for image.point. return tf.clip_by_value(lut, 0, 255) # If step is zero, return the original image. Otherwise, build # lut from the full histogram and step and then index from it. result = tf.cond( tf.equal(step, 0), lambda: im, lambda: tf.gather(build_lut(histo, step), im)) return tf.cast(result, tf.uint8) # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], -1) return image def invert(image: tf.Tensor) -> tf.Tensor: """Inverts the image pixels.""" image = tf.convert_to_tensor(image) return 255 - image def wrap(image: tf.Tensor) -> tf.Tensor: """Returns 'image' with an extra channel set to all 1s.""" shape = tf.shape(image) extended_channel = tf.expand_dims(tf.ones(shape[:-1], image.dtype), -1) extended = tf.concat([image, extended_channel], axis=-1) return extended def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor: """Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. replace: A one or three value 1D tensor to fill empty pixels. Returns: image: A 3D image Tensor with 3 channels. """ image_shape = tf.shape(image) # Flatten the spatial dimensions. flattened_image = tf.reshape(image, [-1, image_shape[-1]]) # Find all pixels where the last channel is zero. alpha_channel = tf.expand_dims(flattened_image[..., 3], axis=-1) replace = tf.concat([replace, tf.ones([1], image.dtype)], 0) # Where they are zero, fill them in with 'replace'. flattened_image = tf.where( tf.equal(alpha_channel, 0), tf.ones_like(flattened_image, dtype=image.dtype) * replace, flattened_image) image = tf.reshape(flattened_image, image_shape) image = tf.slice( image, [0] * image.shape.rank, tf.concat([image_shape[:-1], [3]], -1)) return image def _scale_bbox_only_op_probability(prob): """Reduce the probability of the bbox-only operation. Probability is reduced so that we do not distort the content of too many bounding boxes that are close to each other. The value of 3.0 was a chosen hyper parameter when designing the autoaugment algorithm that we found empirically to work well. Args: prob: Float that is the probability of applying the bbox-only operation. Returns: Reduced probability. """ return prob / 3.0 def _apply_bbox_augmentation(image, bbox, augmentation_func, *args): """Applies augmentation_func to the subsection of image indicated by bbox. Args: image: 3D uint8 Tensor. bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x) of type float that represents the normalized coordinates between 0 and 1. augmentation_func: Augmentation function that will be applied to the subsection of image. *args: Additional parameters that will be passed into augmentation_func when it is called. Returns: A modified version of image, where the bbox location in the image will have `ugmentation_func applied to it. """ image_height = tf.cast(tf.shape(image)[0], tf.float32) image_width = tf.cast(tf.shape(image)[1], tf.float32) min_y = tf.cast(image_height * bbox[0], tf.int32) min_x = tf.cast(image_width * bbox[1], tf.int32) max_y = tf.cast(image_height * bbox[2], tf.int32) max_x = tf.cast(image_width * bbox[3], tf.int32) image_height = tf.cast(image_height, tf.int32) image_width = tf.cast(image_width, tf.int32) # Clip to be sure the max values do not fall out of range. max_y = tf.minimum(max_y, image_height - 1) max_x = tf.minimum(max_x, image_width - 1) # Get the sub-tensor that is the image within the bounding box region. bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :] # Apply the augmentation function to the bbox portion of the image. augmented_bbox_content = augmentation_func(bbox_content, *args) # Pad the augmented_bbox_content and the mask to match the shape of original # image. augmented_bbox_content = tf.pad(augmented_bbox_content, [[min_y, (image_height - 1) - max_y], [min_x, (image_width - 1) - max_x], [0, 0]]) # Create a mask that will be used to zero out a part of the original image. mask_tensor = tf.zeros_like(bbox_content) mask_tensor = tf.pad(mask_tensor, [[min_y, (image_height - 1) - max_y], [min_x, (image_width - 1) - max_x], [0, 0]], constant_values=1) # Replace the old bbox content with the new augmented content. image = image * mask_tensor + augmented_bbox_content return image def _concat_bbox(bbox, bboxes): """Helper function that concates bbox to bboxes along the first dimension.""" # Note if all elements in bboxes are -1 (_INVALID_BOX), then this means # we discard bboxes and start the bboxes Tensor with the current bbox. bboxes_sum_check = tf.reduce_sum(bboxes) bbox = tf.expand_dims(bbox, 0) # This check will be true when it is an _INVALID_BOX bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0), lambda: bbox, lambda: tf.concat([bboxes, bbox], 0)) return bboxes def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob, augmentation_func, func_changes_bbox, *args): """Applies _apply_bbox_augmentation with probability prob. Args: image: 3D uint8 Tensor. bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x) of type float that represents the normalized coordinates between 0 and 1. new_bboxes: 2D Tensor that is a list of the bboxes in the image after they have been altered by aug_func. These will only be changed when func_changes_bbox is set to true. Each bbox has 4 elements (min_y, min_x, max_y, max_x) of type float that are the normalized bbox coordinates between 0 and 1. prob: Float that is the probability of applying _apply_bbox_augmentation. augmentation_func: Augmentation function that will be applied to the subsection of image. func_changes_bbox: Boolean. Does augmentation_func return bbox in addition to image. *args: Additional parameters that will be passed into augmentation_func when it is called. Returns: A tuple. Fist element is a modified version of image, where the bbox location in the image will have augmentation_func applied to it if it is chosen to be called with probability `prob`. The second element is a Tensor of Tensors of length 4 that will contain the altered bbox after applying augmentation_func. """ should_apply_op = tf.cast( tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool) if func_changes_bbox: augmented_image, bbox = tf.cond( should_apply_op, lambda: augmentation_func(image, bbox, *args), lambda: (image, bbox)) else: augmented_image = tf.cond( should_apply_op, lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args), lambda: image) new_bboxes = _concat_bbox(bbox, new_bboxes) return augmented_image, new_bboxes def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func, func_changes_bbox, *args): """Checks to be sure num bboxes > 0 before calling inner function.""" num_bboxes = tf.shape(bboxes)[0] image, bboxes = tf.cond( tf.equal(num_bboxes, 0), lambda: (image, bboxes), # pylint:disable=g-long-lambda lambda: _apply_multi_bbox_augmentation( image, bboxes, prob, aug_func, func_changes_bbox, *args)) # pylint:enable=g-long-lambda return image, bboxes # Represents an invalid bounding box that is used for checking for padding # lists of bounding box coordinates for a few augmentation operations _INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]] def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func, func_changes_bbox, *args): """Applies aug_func to the image for each bbox in bboxes. Args: image: 3D uint8 Tensor. bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox has 4 elements (min_y, min_x, max_y, max_x) of type float. prob: Float that is the probability of applying aug_func to a specific bounding box within the image. aug_func: Augmentation function that will be applied to the subsections of image indicated by the bbox values in bboxes. func_changes_bbox: Boolean. Does augmentation_func return bbox in addition to image. *args: Additional parameters that will be passed into augmentation_func when it is called. Returns: A modified version of image, where each bbox location in the image will have augmentation_func applied to it if it is chosen to be called with probability prob independently across all bboxes. Also the final bboxes are returned that will be unchanged if func_changes_bbox is set to false and if true, the new altered ones will be returned. Raises: ValueError if applied to video. """ if image.shape.rank == 4: raise ValueError('Image rank 4 is not supported') # Will keep track of the new altered bboxes after aug_func is repeatedly # applied. The -1 values are a dummy value and this first Tensor will be # removed upon appending the first real bbox. new_bboxes = tf.constant(_INVALID_BOX) # If the bboxes are empty, then just give it _INVALID_BOX. The result # will be thrown away. bboxes = tf.cond(tf.equal(tf.size(bboxes), 0), lambda: tf.constant(_INVALID_BOX), lambda: bboxes) bboxes = tf.ensure_shape(bboxes, (None, 4)) # pylint:disable=g-long-lambda wrapped_aug_func = ( lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper( _image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)) # pylint:enable=g-long-lambda # Setup the while_loop. num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes. idx = tf.constant(0) # Counter for the while loop. # Conditional function when to end the loop once we go over all bboxes # images_and_bboxes contain (_image, _new_bboxes) cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes) # Shuffle the bboxes so that the augmentation order is not deterministic if # we are not changing the bboxes with aug_func. if not func_changes_bbox: loop_bboxes = tf.random.shuffle(bboxes) else: loop_bboxes = bboxes # Main function of while_loop where we repeatedly apply augmentation on the # bboxes in the image. # pylint:disable=g-long-lambda body = lambda _idx, _images_and_bboxes: [ _idx + 1, wrapped_aug_func(_images_and_bboxes[0], loop_bboxes[_idx], _images_and_bboxes[1])] # pylint:enable=g-long-lambda _, (image, new_bboxes) = tf.while_loop( cond, body, [idx, (image, new_bboxes)], shape_invariants=[idx.get_shape(), (image.get_shape(), tf.TensorShape([None, 4]))]) # Either return the altered bboxes or the original ones depending on if # we altered them in anyway. if func_changes_bbox: final_bboxes = new_bboxes else: final_bboxes = bboxes return image, final_bboxes def _clip_bbox(min_y, min_x, max_y, max_x): """Clip bounding box coordinates between 0 and 1. Args: min_y: Normalized bbox coordinate of type float between 0 and 1. min_x: Normalized bbox coordinate of type float between 0 and 1. max_y: Normalized bbox coordinate of type float between 0 and 1. max_x: Normalized bbox coordinate of type float between 0 and 1. Returns: Clipped coordinate values between 0 and 1. """ min_y = tf.clip_by_value(min_y, 0.0, 1.0) min_x = tf.clip_by_value(min_x, 0.0, 1.0) max_y = tf.clip_by_value(max_y, 0.0, 1.0) max_x = tf.clip_by_value(max_x, 0.0, 1.0) return min_y, min_x, max_y, max_x def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05): """Adjusts bbox coordinates to make sure the area is > 0. Args: min_y: Normalized bbox coordinate of type float between 0 and 1. min_x: Normalized bbox coordinate of type float between 0 and 1. max_y: Normalized bbox coordinate of type float between 0 and 1. max_x: Normalized bbox coordinate of type float between 0 and 1. delta: Float, this is used to create a gap of size 2 * delta between bbox min/max coordinates that are the same on the boundary. This prevents the bbox from having an area of zero. Returns: Tuple of new bbox coordinates between 0 and 1 that will now have a guaranteed area > 0. """ height = max_y - min_y width = max_x - min_x def _adjust_bbox_boundaries(min_coord, max_coord): # Make sure max is never 0 and min is never 1. max_coord = tf.maximum(max_coord, 0.0 + delta) min_coord = tf.minimum(min_coord, 1.0 - delta) return min_coord, max_coord min_y, max_y = tf.cond(tf.equal(height, 0.0), lambda: _adjust_bbox_boundaries(min_y, max_y), lambda: (min_y, max_y)) min_x, max_x = tf.cond(tf.equal(width, 0.0), lambda: _adjust_bbox_boundaries(min_x, max_x), lambda: (min_x, max_x)) return min_y, min_x, max_y, max_x def _rotate_bbox(bbox, image_height, image_width, degrees): """Rotates the bbox coordinated by degrees. Args: bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x) of type float that represents the normalized coordinates between 0 and 1. image_height: Int, height of the image. image_width: Int, height of the image. degrees: Float, a scalar angle in degrees to rotate all images by. If degrees is positive the image will be rotated clockwise otherwise it will be rotated counterclockwise. Returns: A tensor of the same shape as bbox, but now with the rotated coordinates. """ image_height, image_width = ( tf.cast(image_height, tf.float32), tf.cast(image_width, tf.float32)) # Convert from degrees to radians. degrees_to_radians = math.pi / 180.0 radians = degrees * degrees_to_radians # Translate the bbox to the center of the image and turn the normalized 0-1 # coordinates to absolute pixel locations. # Y coordinates are made negative as the y axis of images goes down with # increasing pixel values, so we negate to make sure x axis and y axis points # are in the traditionally positive direction. min_y = -tf.cast(image_height * (bbox[0] - 0.5), tf.int32) min_x = tf.cast(image_width * (bbox[1] - 0.5), tf.int32) max_y = -tf.cast(image_height * (bbox[2] - 0.5), tf.int32) max_x = tf.cast(image_width * (bbox[3] - 0.5), tf.int32) coordinates = tf.stack( [[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]]) coordinates = tf.cast(coordinates, tf.float32) # Rotate the coordinates according to the rotation matrix clockwise if # radians is positive, else negative rotation_matrix = tf.stack( [[tf.cos(radians), tf.sin(radians)], [-tf.sin(radians), tf.cos(radians)]]) new_coords = tf.cast( tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32) # Find min/max values and convert them back to normalized 0-1 floats. min_y = -( tf.cast(tf.reduce_max(new_coords[0, :]), tf.float32) / image_height - 0.5) min_x = tf.cast(tf.reduce_min(new_coords[1, :]), tf.float32) / image_width + 0.5 max_y = -( tf.cast(tf.reduce_min(new_coords[0, :]), tf.float32) / image_height - 0.5) max_x = tf.cast(tf.reduce_max(new_coords[1, :]), tf.float32) / image_width + 0.5 # Clip the bboxes to be sure the fall between [0, 1]. min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x) min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x) return tf.stack([min_y, min_x, max_y, max_x]) def rotate_with_bboxes(image, bboxes, degrees, replace): """Equivalent of PIL Rotate that rotates the image and bbox. Args: image: 3D uint8 Tensor. bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox has 4 elements (min_y, min_x, max_y, max_x) of type float. degrees: Float, a scalar angle in degrees to rotate all images by. If degrees is positive the image will be rotated clockwise otherwise it will be rotated counterclockwise. replace: A one or three value 1D tensor to fill empty pixels. Returns: A tuple containing a 3D uint8 Tensor that will be the result of rotating image by degrees. The second element of the tuple is bboxes, where now the coordinates will be shifted to reflect the rotated image. Raises: ValueError: If applied to video. """ if image.shape.rank == 4: raise ValueError('Image rank 4 is not supported') # Rotate the image. image = wrapped_rotate(image, degrees, replace) # Convert bbox coordinates to pixel values. image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] # pylint:disable=g-long-lambda wrapped_rotate_bbox = lambda bbox: _rotate_bbox( bbox, image_height, image_width, degrees) # pylint:enable=g-long-lambda bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes) return image, bboxes def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal): """Shifts the bbox according to how the image was sheared. Args: bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x) of type float that represents the normalized coordinates between 0 and 1. image_height: Int, height of the image. image_width: Int, height of the image. level: Float. How much to shear the image. shear_horizontal: If true then shear in X dimension else shear in the Y dimension. Returns: A tensor of the same shape as bbox, but now with the shifted coordinates. """ image_height, image_width = ( tf.cast(image_height, tf.float32), tf.cast(image_width, tf.float32)) # Change bbox coordinates to be pixels. min_y = tf.cast(image_height * bbox[0], tf.int32) min_x = tf.cast(image_width * bbox[1], tf.int32) max_y = tf.cast(image_height * bbox[2], tf.int32) max_x = tf.cast(image_width * bbox[3], tf.int32) coordinates = tf.stack( [[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]]) coordinates = tf.cast(coordinates, tf.float32) # Shear the coordinates according to the translation matrix. if shear_horizontal: translation_matrix = tf.stack( [[1, 0], [-level, 1]]) else: translation_matrix = tf.stack( [[1, -level], [0, 1]]) translation_matrix = tf.cast(translation_matrix, tf.float32) new_coords = tf.cast( tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32) # Find min/max values and convert them back to floats. min_y = tf.cast(tf.reduce_min(new_coords[0, :]), tf.float32) / image_height min_x = tf.cast(tf.reduce_min(new_coords[1, :]), tf.float32) / image_width max_y = tf.cast(tf.reduce_max(new_coords[0, :]), tf.float32) / image_height max_x = tf.cast(tf.reduce_max(new_coords[1, :]), tf.float32) / image_width # Clip the bboxes to be sure the fall between [0, 1]. min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x) min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x) return tf.stack([min_y, min_x, max_y, max_x]) def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal): """Applies Shear Transformation to the image and shifts the bboxes. Args: image: 3D uint8 Tensor. bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox has 4 elements (min_y, min_x, max_y, max_x) of type float with values between [0, 1]. level: Float. How much to shear the image. This value will be between -0.3 to 0.3. replace: A one or three value 1D tensor to fill empty pixels. shear_horizontal: Boolean. If true then shear in X dimension else shear in the Y dimension. Returns: A tuple containing a 3D uint8 Tensor that will be the result of shearing image by level. The second element of the tuple is bboxes, where now the coordinates will be shifted to reflect the sheared image. Raises: ValueError: If applied to video. """ if image.shape.rank == 4: raise ValueError('Image rank 4 is not supported') if shear_horizontal: image = shear_x(image, level, replace) else: image = shear_y(image, level, replace) # Convert bbox coordinates to pixel values. image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] # pylint:disable=g-long-lambda wrapped_shear_bbox = lambda bbox: _shear_bbox( bbox, image_height, image_width, level, shear_horizontal) # pylint:enable=g-long-lambda bboxes = tf.map_fn(wrapped_shear_bbox, bboxes) return image, bboxes def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal): """Shifts the bbox coordinates by pixels. Args: bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x) of type float that represents the normalized coordinates between 0 and 1. image_height: Int, height of the image. image_width: Int, width of the image. pixels: An int. How many pixels to shift the bbox. shift_horizontal: Boolean. If true then shift in X dimension else shift in Y dimension. Returns: A tensor of the same shape as bbox, but now with the shifted coordinates. """ pixels = tf.cast(pixels, tf.int32) # Convert bbox to integer pixel locations. min_y = tf.cast(tf.cast(image_height, tf.float32) * bbox[0], tf.int32) min_x = tf.cast(tf.cast(image_width, tf.float32) * bbox[1], tf.int32) max_y = tf.cast(tf.cast(image_height, tf.float32) * bbox[2], tf.int32) max_x = tf.cast(tf.cast(image_width, tf.float32) * bbox[3], tf.int32) if shift_horizontal: min_x = tf.maximum(0, min_x - pixels) max_x = tf.minimum(image_width, max_x - pixels) else: min_y = tf.maximum(0, min_y - pixels) max_y = tf.minimum(image_height, max_y - pixels) # Convert bbox back to floats. min_y = tf.cast(min_y, tf.float32) / tf.cast(image_height, tf.float32) min_x = tf.cast(min_x, tf.float32) / tf.cast(image_width, tf.float32) max_y = tf.cast(max_y, tf.float32) / tf.cast(image_height, tf.float32) max_x = tf.cast(max_x, tf.float32) / tf.cast(image_width, tf.float32) # Clip the bboxes to be sure the fall between [0, 1]. min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x) min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x) return tf.stack([min_y, min_x, max_y, max_x]) def translate_bbox(image, bboxes, pixels, replace, shift_horizontal): """Equivalent of PIL Translate in X/Y dimension that shifts image and bbox. Args: image: 3D uint8 Tensor. bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox has 4 elements (min_y, min_x, max_y, max_x) of type float with values between [0, 1]. pixels: An int. How many pixels to shift the image and bboxes replace: A one or three value 1D tensor to fill empty pixels. shift_horizontal: Boolean. If true then shift in X dimension else shift in Y dimension. Returns: A tuple containing a 3D uint8 Tensor that will be the result of translating image by pixels. The second element of the tuple is bboxes, where now the coordinates will be shifted to reflect the shifted image. Raises: ValueError if applied to video. """ if image.shape.rank == 4: raise ValueError('Image rank 4 is not supported') if shift_horizontal: image = translate_x(image, pixels, replace) else: image = translate_y(image, pixels, replace) # Convert bbox coordinates to pixel values. image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] # pylint:disable=g-long-lambda wrapped_shift_bbox = lambda bbox: _shift_bbox( bbox, image_height, image_width, pixels, shift_horizontal) # pylint:enable=g-long-lambda bboxes = tf.map_fn(wrapped_shift_bbox, bboxes) return image, bboxes def translate_y_only_bboxes( image: tf.Tensor, bboxes: tf.Tensor, prob: float, pixels: int, replace): """Apply translate_y to each bbox in the image with probability prob.""" if bboxes.shape.rank == 4: raise ValueError('translate_y_only_bboxes does not support rank 4 boxes') func_changes_bbox = False prob = _scale_bbox_only_op_probability(prob) return _apply_multi_bbox_augmentation_wrapper( image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace) def _randomly_negate_tensor(tensor): """With 50% prob turn the tensor negative.""" should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool) final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor) return final_tensor def _rotate_level_to_arg(level: float): level = (level / _MAX_LEVEL) * 30. level = _randomly_negate_tensor(level) return (level,) def _shrink_level_to_arg(level: float): """Converts level to ratio by which we shrink the image content.""" if level == 0: return (1.0,) # if level is zero, do not shrink the image # Maximum shrinking ratio is 2.9. level = 2. / (_MAX_LEVEL / level) + 0.9 return (level,) def _enhance_level_to_arg(level: float): return ((level / _MAX_LEVEL) * 1.8 + 0.1,) def _shear_level_to_arg(level: float): level = (level / _MAX_LEVEL) * 0.3 # Flip level to negative with 50% chance. level = _randomly_negate_tensor(level) return (level,) def _translate_level_to_arg(level: float, translate_const: float): level = (level / _MAX_LEVEL) * float(translate_const) # Flip level to negative with 50% chance. level = _randomly_negate_tensor(level) return (level,) def _gaussian_noise_level_to_arg(level: float, translate_const: float): low_std = (level / _MAX_LEVEL) high_std = translate_const * low_std return low_std, high_std def _mult_to_arg(level: float, multiplier: float = 1.): return (int((level / _MAX_LEVEL) * multiplier),) def _apply_func_with_prob(func: Any, image: tf.Tensor, bboxes: Optional[tf.Tensor], args: Any, prob: float): """Apply `func` to image w/ `args` as input with probability `prob`.""" assert isinstance(args, tuple) assert inspect.getfullargspec(func)[0][1] == 'bboxes' # Apply the function with probability `prob`. should_apply_op = tf.cast( tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool) augmented_image, augmented_bboxes = tf.cond( should_apply_op, lambda: func(image, bboxes, *args), lambda: (image, bboxes)) return augmented_image, augmented_bboxes def select_and_apply_random_policy(policies: Any, image: tf.Tensor, bboxes: Optional[tf.Tensor] = None): """Select a random policy from `policies` and apply it to `image`.""" policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32) # Note that using tf.case instead of tf.conds would result in significantly # larger graphs and would even break export for some larger policies. for (i, policy) in enumerate(policies): image, bboxes = tf.cond( tf.equal(i, policy_to_select), lambda selected_policy=policy: selected_policy(image, bboxes), lambda: (image, bboxes)) return image, bboxes NAME_TO_FUNC = { 'AutoContrast': autocontrast, 'Equalize': equalize, 'Invert': invert, 'Rotate': wrapped_rotate, 'Posterize': posterize, 'Solarize': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'Contrast': contrast, 'Brightness': brightness, 'Sharpness': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x, 'TranslateY': translate_y, 'Cutout': cutout, 'Rotate_BBox': rotate_with_bboxes, 'Grayscale': grayscale, 'Gaussian_Noise': gaussian_noise, # pylint:disable=g-long-lambda 'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes( image, bboxes, level, replace, shear_horizontal=True), 'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes( image, bboxes, level, replace, shear_horizontal=False), 'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox( image, bboxes, pixels, replace, shift_horizontal=True), 'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox( image, bboxes, pixels, replace, shift_horizontal=False), # pylint:enable=g-long-lambda 'TranslateY_Only_BBoxes': translate_y_only_bboxes, } # Functions that require a `bboxes` parameter. REQUIRE_BOXES_FUNCS = frozenset({ 'Rotate_BBox', 'ShearX_BBox', 'ShearY_BBox', 'TranslateX_BBox', 'TranslateY_BBox', 'TranslateY_Only_BBoxes', }) # Functions that have a 'prob' parameter PROB_FUNCS = frozenset({ 'TranslateY_Only_BBoxes', }) # Functions that have a 'replace' parameter REPLACE_FUNCS = frozenset({ 'Rotate', 'TranslateX', 'ShearX', 'ShearY', 'TranslateY', 'Cutout', 'Rotate_BBox', 'ShearX_BBox', 'ShearY_BBox', 'TranslateX_BBox', 'TranslateY_BBox', 'TranslateY_Only_BBoxes', }) def level_to_arg(cutout_const: float, translate_const: float): """Creates a dict mapping image operation names to their arguments.""" no_arg = lambda level: () posterize_arg = lambda level: _mult_to_arg(level, 4) solarize_arg = lambda level: _mult_to_arg(level, 256) solarize_add_arg = lambda level: _mult_to_arg(level, 110) cutout_arg = lambda level: _mult_to_arg(level, cutout_const) translate_arg = lambda level: _translate_level_to_arg(level, translate_const) translate_bbox_arg = lambda level: _translate_level_to_arg(level, 120) args = { 'AutoContrast': no_arg, 'Equalize': no_arg, 'Invert': no_arg, 'Rotate': _rotate_level_to_arg, 'Posterize': posterize_arg, 'Solarize': solarize_arg, 'SolarizeAdd': solarize_add_arg, 'Color': _enhance_level_to_arg, 'Contrast': _enhance_level_to_arg, 'Brightness': _enhance_level_to_arg, 'Sharpness': _enhance_level_to_arg, 'ShearX': _shear_level_to_arg, 'ShearY': _shear_level_to_arg, 'Cutout': cutout_arg, 'TranslateX': translate_arg, 'TranslateY': translate_arg, 'Rotate_BBox': _rotate_level_to_arg, 'ShearX_BBox': _shear_level_to_arg, 'ShearY_BBox': _shear_level_to_arg, 'Grayscale': no_arg, # pylint:disable=g-long-lambda 'Gaussian_Noise': lambda level: _gaussian_noise_level_to_arg( level, translate_const), # pylint:disable=g-long-lambda 'TranslateX_BBox': lambda level: _translate_level_to_arg( level, translate_const), 'TranslateY_BBox': lambda level: _translate_level_to_arg( level, translate_const), # pylint:enable=g-long-lambda 'TranslateY_Only_BBoxes': translate_bbox_arg, } return args def bbox_wrapper(func): """Adds a bboxes function argument to func and returns unchanged bboxes.""" def wrapper(images, bboxes, *args, **kwargs): return (func(images, *args, **kwargs), bboxes) return wrapper def _parse_policy_info(name: str, prob: float, level: float, replace_value: List[int], cutout_const: float, translate_const: float, level_std: float = 0.) -> Tuple[Any, float, Any]: """Return the function that corresponds to `name` and update `level` param.""" func = NAME_TO_FUNC[name] if level_std > 0: level += tf.random.normal([], dtype=tf.float32) level = tf.clip_by_value(level, 0., _MAX_LEVEL) args = level_to_arg(cutout_const, translate_const)[name](level) if name in PROB_FUNCS: # Add in the prob arg if it is required for the function that is called. args = tuple([prob] + list(args)) if name in REPLACE_FUNCS: # Add in replace arg if it is required for the function that is called. args = tuple(list(args) + [replace_value]) # Add bboxes as the second positional argument for the function if it does # not already exist. if 'bboxes' not in inspect.getfullargspec(func)[0]: func = bbox_wrapper(func) return func, prob, args class ImageAugment(object): """Image augmentation class for applying image distortions.""" def distort( self, image: tf.Tensor ) -> tf.Tensor: """Given an image tensor, returns a distorted image with the same shape. Args: image: `Tensor` of shape [height, width, 3] or [num_frames, height, width, 3] representing an image or image sequence. Returns: The augmented version of `image`. """ raise NotImplementedError() def distort_with_boxes( self, image: tf.Tensor, bboxes: tf.Tensor ) -> Tuple[tf.Tensor, tf.Tensor]: """Distorts the image and bounding boxes. Args: image: `Tensor` of shape [height, width, 3] or [num_frames, height, width, 3] representing an image or image sequence. bboxes: `Tensor` of shape [num_boxes, 4] or [num_frames, num_boxes, 4] representing bounding boxes for an image or image sequence. Returns: The augmented version of `image` and `bboxes`. """ raise NotImplementedError class AutoAugment(ImageAugment): """Applies the AutoAugment policy to images. AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. """ def __init__(self, augmentation_name: str = 'v0', policies: Optional[Iterable[Iterable[Tuple[str, float, float]]]] = None, cutout_const: float = 100, translate_const: float = 250): """Applies the AutoAugment policy to images. Args: augmentation_name: The name of the AutoAugment policy to use. The available options are `v0`, `test`, `reduced_cifar10`, `svhn` and `reduced_imagenet`. `v0` is the policy used for all of the results in the paper and was found to achieve the best results on the COCO dataset. `v1`, `v2` and `v3` are additional good policies found on the COCO dataset that have slight variation in what operations were used during the search procedure along with how many operations are applied in parallel to a single image (2 vs 3). Make sure to set `policies` to `None` (the default) if you want to set options using `augmentation_name`. policies: list of lists of tuples in the form `(func, prob, level)`, `func` is a string name of the augmentation function, `prob` is the probability of applying the `func` operation, `level` (or magnitude) is the input argument for `func`. For example: ``` [[('Equalize', 0.9, 3), ('Color', 0.7, 8)], [('Invert', 0.6, 5), ('Rotate', 0.2, 9), ('ShearX', 0.1, 2)], ...] ``` The outer-most list must be 3-d. The number of operations in a sub-policy can vary from one sub-policy to another. If you provide `policies` as input, any option set with `augmentation_name` will get overriden as they are mutually exclusive. cutout_const: multiplier for applying cutout. translate_const: multiplier for applying translation. Raises: ValueError if `augmentation_name` is unsupported. """ super(AutoAugment, self).__init__() self.augmentation_name = augmentation_name self.cutout_const = float(cutout_const) self.translate_const = float(translate_const) self.available_policies = { 'detection_v0': self.detection_policy_v0(), 'v0': self.policy_v0(), 'test': self.policy_test(), 'simple': self.policy_simple(), 'reduced_cifar10': self.policy_reduced_cifar10(), 'svhn': self.policy_svhn(), 'reduced_imagenet': self.policy_reduced_imagenet(), 'panoptic_deeplab_policy': self.panoptic_deeplab_policy(), 'vit': self.vit(), 'deit3_three_augment': self.deit3_three_augment(), } if not policies: if augmentation_name not in self.available_policies: raise ValueError( 'Invalid augmentation_name: {}'.format(augmentation_name)) self.policies = self.available_policies[augmentation_name] else: self._check_policy_shape(policies) self.policies = policies def _check_policy_shape(self, policies): """Checks dimension and shape of the custom policy. Args: policies: List of list of tuples in the form `(func, prob, level)`. Must have shape of `(:, :, 3)`. Raises: ValueError if the shape of `policies` is unexpected. """ in_shape = np.array(policies).shape if len(in_shape) != 3 or in_shape[-1:] != (3,): raise ValueError('Wrong shape detected for custom policy. Expected ' '(:, :, 3) but got {}.'.format(in_shape)) def _make_tf_policies(self): """Prepares the TF functions for augmentations based on the policies.""" replace_value = [128] * 3 # func is the string name of the augmentation function, prob is the # probability of applying the operation and level is the parameter # associated with the tf op. # tf_policies are functions that take in an image and return an augmented # image. tf_policies = [] for policy in self.policies: tf_policy = [] assert_ranges = [] # Link string name to the correct python function and make sure the # correct argument is passed into that function. for policy_info in policy: _, prob, level = policy_info assert_ranges.append(tf.Assert(tf.less_equal(prob, 1.), [prob])) assert_ranges.append( tf.Assert(tf.less_equal(level, int(_MAX_LEVEL)), [level])) policy_info = list(policy_info) + [ replace_value, self.cutout_const, self.translate_const ] tf_policy.append(_parse_policy_info(*policy_info)) # Now build the tf policy that will apply the augmentation procedue # on image. def make_final_policy(tf_policy_): def final_policy(image_, bboxes_): for func, prob, args in tf_policy_: image_, bboxes_ = _apply_func_with_prob(func, image_, bboxes_, args, prob) return image_, bboxes_ return final_policy with tf.control_dependencies(assert_ranges): tf_policies.append(make_final_policy(tf_policy)) return tf_policies def distort(self, image: tf.Tensor) -> tf.Tensor: """See base class.""" input_image_type = image.dtype if input_image_type != tf.uint8: image = tf.clip_by_value(image, 0.0, 255.0) image = tf.cast(image, dtype=tf.uint8) tf_policies = self._make_tf_policies() image, _ = select_and_apply_random_policy(tf_policies, image, bboxes=None) image = tf.cast(image, dtype=input_image_type) return image def distort_with_boxes(self, image: tf.Tensor, bboxes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """See base class.""" input_image_type = image.dtype if input_image_type != tf.uint8: image = tf.clip_by_value(image, 0.0, 255.0) image = tf.cast(image, dtype=tf.uint8) tf_policies = self._make_tf_policies() image, bboxes = select_and_apply_random_policy(tf_policies, image, bboxes) return image, bboxes @staticmethod def detection_policy_v0(): """Autoaugment policy that was used in AutoAugment Paper for Detection. https://arxiv.org/pdf/1906.11172 Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied sequentially on the image. Returns: the policy. """ policy = [ [('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)], [('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)], [('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)], [('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)], [('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)], ] return policy @staticmethod def policy_v0(): """Autoaugment policy that was used in AutoAugment Paper. Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied sequentially on the image. Returns: the policy. """ policy = [ [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] return policy @staticmethod def policy_reduced_cifar10(): """Autoaugment policy for reduced CIFAR-10 dataset. Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501. Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied sequentially on the image. Returns: the policy. """ policy = [ [('Invert', 0.1, 7), ('Contrast', 0.2, 6)], [('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)], [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)], [('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)], [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)], [('ShearY', 0.2, 7), ('Posterize', 0.3, 7)], [('Color', 0.4, 3), ('Brightness', 0.6, 7)], [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)], [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)], [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)], [('Color', 0.7, 7), ('TranslateX', 0.5, 8)], [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)], [('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)], [('Brightness', 0.9, 6), ('Color', 0.2, 8)], [('Solarize', 0.5, 2), ('Invert', 0.0, 3)], [('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)], [('Equalize', 0.2, 8), ('Equalize', 0.6, 4)], [('Color', 0.9, 9), ('Equalize', 0.6, 6)], [('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)], [('Brightness', 0.1, 3), ('Color', 0.7, 0)], [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)], [('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)], [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)], [('Equalize', 0.8, 8), ('Invert', 0.1, 3)], [('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)], ] return policy @staticmethod def policy_svhn(): """Autoaugment policy for SVHN dataset. Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501. Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied sequentially on the image. Returns: the policy. """ policy = [ [('ShearX', 0.9, 4), ('Invert', 0.2, 3)], [('ShearY', 0.9, 8), ('Invert', 0.7, 5)], [('Equalize', 0.6, 5), ('Solarize', 0.6, 6)], [('Invert', 0.9, 3), ('Equalize', 0.6, 3)], [('Equalize', 0.6, 1), ('Rotate', 0.9, 3)], [('ShearX', 0.9, 4), ('AutoContrast', 0.8, 3)], [('ShearY', 0.9, 8), ('Invert', 0.4, 5)], [('ShearY', 0.9, 5), ('Solarize', 0.2, 6)], [('Invert', 0.9, 6), ('AutoContrast', 0.8, 1)], [('Equalize', 0.6, 3), ('Rotate', 0.9, 3)], [('ShearX', 0.9, 4), ('Solarize', 0.3, 3)], [('ShearY', 0.8, 8), ('Invert', 0.7, 4)], [('Equalize', 0.9, 5), ('TranslateY', 0.6, 6)], [('Invert', 0.9, 4), ('Equalize', 0.6, 7)], [('Contrast', 0.3, 3), ('Rotate', 0.8, 4)], [('Invert', 0.8, 5), ('TranslateY', 0.0, 2)], [('ShearY', 0.7, 6), ('Solarize', 0.4, 8)], [('Invert', 0.6, 4), ('Rotate', 0.8, 4)], [('ShearY', 0.3, 7), ('TranslateX', 0.9, 3)], [('ShearX', 0.1, 6), ('Invert', 0.6, 5)], [('Solarize', 0.7, 2), ('TranslateY', 0.6, 7)], [('ShearY', 0.8, 4), ('Invert', 0.8, 8)], [('ShearX', 0.7, 9), ('TranslateY', 0.8, 3)], [('ShearY', 0.8, 5), ('AutoContrast', 0.7, 3)], [('ShearX', 0.7, 2), ('Invert', 0.1, 5)], ] return policy @staticmethod def policy_reduced_imagenet(): """Autoaugment policy for reduced ImageNet dataset. Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501. Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied sequentially on the image. Returns: the policy. """ policy = [ [('Posterize', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('Posterize', 0.6, 7), ('Posterize', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('Posterize', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('Posterize', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)] ] return policy @staticmethod def policy_simple(): """Same as `policy_v0`, except with custom ops removed.""" policy = [ [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], ] return policy @staticmethod def panoptic_deeplab_policy(): policy = [ [('Sharpness', 0.4, 1.4), ('Brightness', 0.2, 2.0)], [('Equalize', 0.0, 1.8), ('Contrast', 0.2, 2.0)], [('Sharpness', 0.2, 1.8), ('Color', 0.2, 1.8)], [('Solarize', 0.2, 1.4), ('Equalize', 0.6, 1.8)], [('Sharpness', 0.2, 0.2), ('Equalize', 0.2, 1.4)]] return policy @staticmethod def vit(): """Autoaugment policy for a generic ViT.""" policy = [ [('Sharpness', 0.4, 1.4), ('Brightness', 0.2, 2.0), ('Cutout', 0.8, 8)], [('Equalize', 0.0, 1.8), ('Contrast', 0.2, 2.0), ('Cutout', 0.8, 8)], [('Sharpness', 0.2, 1.8), ('Color', 0.2, 1.8), ('Cutout', 0.8, 8)], [('Solarize', 0.2, 1.4), ('Equalize', 0.6, 1.8), ('Cutout', 0.8, 8)], [('Sharpness', 0.2, 0.2), ('Equalize', 0.2, 1.4), ('Cutout', 0.8, 8)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8), ('Cutout', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8), ('Cutout', 0.8, 8)], [('Posterize', 0.6, 7), ('Posterize', 0.6, 6), ('Cutout', 0.8, 8)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5), ('Cutout', 0.8, 8)], ] return policy @staticmethod def deit3_three_augment(): """Autoaugment policy for three augmentations. Proposed in paper: https://arxiv.org/abs/2204.07118. Each tuple is an augmentation operation of the form (operation, probability, magnitude). Each element in policy is a sub-policy that will be applied on the image. Randomly chooses one of the three augmentation to apply on image. Returns: the policy. """ policy = [ [('Grayscale', 1.0, 0)], [('Solarize', 1.0, 5)], # to have threshold as 128 [('Gaussian_Noise', 1.0, 1)], # to have low_std as 0.1 ] return policy @staticmethod def policy_test(): """Autoaugment test policy for debugging.""" policy = [ [('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)], ] return policy def _maybe_identity(x: Optional[tf.Tensor]) -> Optional[tf.Tensor]: return tf.identity(x) if x is not None else None class RandAugment(ImageAugment): """Applies the RandAugment policy to images. RandAugment is from the paper https://arxiv.org/abs/1909.13719. """ def __init__(self, num_layers: int = 2, magnitude: float = 10., cutout_const: float = 40., translate_const: float = 100., magnitude_std: float = 0.0, prob_to_apply: Optional[float] = None, exclude_ops: Optional[List[str]] = None): """Applies the RandAugment policy to images. Args: num_layers: Integer, the number of augmentation transformations to apply sequentially to an image. Represented as (N) in the paper. Usually best values will be in the range [1, 3]. magnitude: Integer, shared magnitude across all augmentation operations. Represented as (M) in the paper. Usually best values are in the range [5, 10]. cutout_const: multiplier for applying cutout. translate_const: multiplier for applying translation. magnitude_std: randomness of the severity as proposed by the authors of the timm library. prob_to_apply: The probability to apply the selected augmentation at each layer. exclude_ops: exclude selected operations. """ super(RandAugment, self).__init__() self.num_layers = num_layers self.magnitude = float(magnitude) self.cutout_const = float(cutout_const) self.translate_const = float(translate_const) self.prob_to_apply = ( float(prob_to_apply) if prob_to_apply is not None else None) self.available_ops = [ 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd' ] self.magnitude_std = magnitude_std if exclude_ops: self.available_ops = [ op for op in self.available_ops if op not in exclude_ops ] @classmethod def build_for_detection(cls, num_layers: int = 2, magnitude: float = 10., cutout_const: float = 40., translate_const: float = 100., magnitude_std: float = 0.0, prob_to_apply: Optional[float] = None, exclude_ops: Optional[List[str]] = None): """Builds a RandAugment that modifies bboxes for geometric transforms.""" augmenter = cls( num_layers=num_layers, magnitude=magnitude, cutout_const=cutout_const, translate_const=translate_const, magnitude_std=magnitude_std, prob_to_apply=prob_to_apply, exclude_ops=exclude_ops) box_aware_ops_by_base_name = { 'Rotate': 'Rotate_BBox', 'ShearX': 'ShearX_BBox', 'ShearY': 'ShearY_BBox', 'TranslateX': 'TranslateX_BBox', 'TranslateY': 'TranslateY_BBox', } augmenter.available_ops = [ box_aware_ops_by_base_name.get(op_name) or op_name for op_name in augmenter.available_ops ] return augmenter def _distort_common( self, image: tf.Tensor, bboxes: Optional[tf.Tensor] = None ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Distorts the image and optionally bounding boxes.""" input_image_type = image.dtype if input_image_type != tf.uint8: image = tf.clip_by_value(image, 0.0, 255.0) image = tf.cast(image, dtype=tf.uint8) replace_value = [128] * 3 min_prob, max_prob = 0.2, 0.8 aug_image = image aug_bboxes = bboxes for _ in range(self.num_layers): op_to_select = tf.random.uniform([], maxval=len(self.available_ops) + 1, dtype=tf.int32) branch_fns = [] for (i, op_name) in enumerate(self.available_ops): prob = tf.random.uniform([], minval=min_prob, maxval=max_prob, dtype=tf.float32) func, _, args = _parse_policy_info(op_name, prob, self.magnitude, replace_value, self.cutout_const, self.translate_const, self.magnitude_std) branch_fns.append(( i, # pylint:disable=g-long-lambda lambda selected_func=func, selected_args=args: selected_func( image, bboxes, *selected_args))) # pylint:enable=g-long-lambda aug_image, aug_bboxes = tf.switch_case( branch_index=op_to_select, branch_fns=branch_fns, default=lambda: (tf.identity(image), _maybe_identity(bboxes))) # pylint: disable=cell-var-from-loop if self.prob_to_apply is not None: aug_image, aug_bboxes = tf.cond( tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply, lambda: (tf.identity(aug_image), _maybe_identity(aug_bboxes)), lambda: (tf.identity(image), _maybe_identity(bboxes))) image = aug_image bboxes = aug_bboxes image = tf.cast(image, dtype=input_image_type) return image, bboxes def distort(self, image: tf.Tensor) -> tf.Tensor: """See base class.""" image, _ = self._distort_common(image) return image def distort_with_boxes(self, image: tf.Tensor, bboxes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """See base class.""" image, bboxes = self._distort_common(image, bboxes) return image, bboxes class RandomErasing(ImageAugment): """Applies RandomErasing to a single image. Reference: https://arxiv.org/abs/1708.04896 Implementation is inspired by https://github.com/rwightman/pytorch-image-models. """ def __init__(self, probability: float = 0.25, min_area: float = 0.02, max_area: float = 1 / 3, min_aspect: float = 0.3, max_aspect: Optional[float] = None, min_count=1, max_count=1, trials=10): """Applies RandomErasing to a single image. Args: probability: Probability of augmenting the image. Defaults to `0.25`. min_area: Minimum area of the random erasing rectangle. Defaults to `0.02`. max_area: Maximum area of the random erasing rectangle. Defaults to `1/3`. min_aspect: Minimum aspect rate of the random erasing rectangle. Defaults to `0.3`. max_aspect: Maximum aspect rate of the random erasing rectangle. Defaults to `None`. min_count: Minimum number of erased rectangles. Defaults to `1`. max_count: Maximum number of erased rectangles. Defaults to `1`. trials: Maximum number of trials to randomly sample a rectangle that fulfills constraint. Defaults to `10`. """ self._probability = probability self._min_area = float(min_area) self._max_area = float(max_area) self._min_log_aspect = math.log(min_aspect) self._max_log_aspect = math.log(max_aspect or 1 / min_aspect) self._min_count = min_count self._max_count = max_count self._trials = trials def distort(self, image: tf.Tensor) -> tf.Tensor: """Applies RandomErasing to single `image`. Args: image (tf.Tensor): Of shape [height, width, 3] representing an image. Returns: tf.Tensor: The augmented version of `image`. """ uniform_random = tf.random.uniform(shape=[], minval=0., maxval=1.0) mirror_cond = tf.less(uniform_random, self._probability) image = tf.cond(mirror_cond, lambda: self._erase(image), lambda: image) return image @tf.function def _erase(self, image: tf.Tensor) -> tf.Tensor: """Erase an area.""" if self._min_count == self._max_count: count = self._min_count else: count = tf.random.uniform( shape=[], minval=int(self._min_count), maxval=int(self._max_count - self._min_count + 1), dtype=tf.int32) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] area = tf.cast(image_width * image_height, tf.float32) for _ in range(count): # Work around since break is not supported in tf.function is_trial_successfull = False for _ in range(self._trials): if not is_trial_successfull: erase_area = tf.random.uniform( shape=[], minval=area * self._min_area, maxval=area * self._max_area) aspect_ratio = tf.math.exp( tf.random.uniform( shape=[], minval=self._min_log_aspect, maxval=self._max_log_aspect)) half_height = tf.cast( tf.math.round(tf.math.sqrt(erase_area * aspect_ratio) / 2), dtype=tf.int32) half_width = tf.cast( tf.math.round(tf.math.sqrt(erase_area / aspect_ratio) / 2), dtype=tf.int32) if 2 * half_height < image_height and 2 * half_width < image_width: center_height = tf.random.uniform( shape=[], minval=0, maxval=int(image_height - 2 * half_height), dtype=tf.int32) center_width = tf.random.uniform( shape=[], minval=0, maxval=int(image_width - 2 * half_width), dtype=tf.int32) image = _fill_rectangle( image, center_width, center_height, half_width, half_height, replace=None) is_trial_successfull = True return image class MixupAndCutmix: """Applies Mixup and/or Cutmix to a batch of images. - Mixup: https://arxiv.org/abs/1710.09412 - Cutmix: https://arxiv.org/abs/1905.04899 Implementaion is inspired by https://github.com/rwightman/pytorch-image-models """ def __init__(self, mixup_alpha: float = .8, cutmix_alpha: float = 1., prob: float = 1.0, switch_prob: float = 0.5, label_smoothing: float = 0.1, num_classes: int = 1001): """Applies Mixup and/or Cutmix to a batch of images. Args: mixup_alpha (float, optional): For drawing a random lambda (`lam`) from a beta distribution (for each image). If zero Mixup is deactivated. Defaults to .8. cutmix_alpha (float, optional): For drawing a random lambda (`lam`) from a beta distribution (for each image). If zero Cutmix is deactivated. Defaults to 1.. prob (float, optional): Of augmenting the batch. Defaults to 1.0. switch_prob (float, optional): Probability of applying Cutmix for the batch. Defaults to 0.5. label_smoothing (float, optional): Constant for label smoothing. Defaults to 0.1. num_classes (int, optional): Number of classes. Defaults to 1001. """ self.mixup_alpha = mixup_alpha self.cutmix_alpha = cutmix_alpha self.mix_prob = prob self.switch_prob = switch_prob self.label_smoothing = label_smoothing self.num_classes = num_classes self.mode = 'batch' self.mixup_enabled = True if self.mixup_alpha and not self.cutmix_alpha: self.switch_prob = -1 elif not self.mixup_alpha and self.cutmix_alpha: self.switch_prob = 1 def __call__(self, images: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: return self.distort(images, labels) def distort(self, images: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """Applies Mixup and/or Cutmix to batch of images and transforms labels. Args: images (tf.Tensor): Of shape [batch_size, height, width, 3] representing a batch of image, or [batch_size, time, height, width, 3] representing a batch of video. labels (tf.Tensor): Of shape [batch_size, ] representing the class id for each image of the batch. Returns: Tuple[tf.Tensor, tf.Tensor]: The augmented version of `image` and `labels`. """ labels = tf.reshape(labels, [-1]) augment_cond = tf.less( tf.random.uniform(shape=[], minval=0., maxval=1.0), self.mix_prob) # pylint: disable=g-long-lambda augment_a = lambda: self._update_labels(*tf.cond( tf.less( tf.random.uniform(shape=[], minval=0., maxval=1.0), self.switch_prob ), lambda: self._cutmix(images, labels), lambda: self._mixup( images, labels))) augment_b = lambda: (images, self._smooth_labels(labels)) # pylint: enable=g-long-lambda return tf.cond(augment_cond, augment_a, augment_b) @staticmethod def _sample_from_beta(alpha, beta, shape): sample_alpha = tf.random.gamma(shape, 1., beta=alpha) sample_beta = tf.random.gamma(shape, 1., beta=beta) return sample_alpha / (sample_alpha + sample_beta) def _cutmix(self, images: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Applies cutmix.""" lam = MixupAndCutmix._sample_from_beta(self.cutmix_alpha, self.cutmix_alpha, tf.shape(labels)) ratio = tf.math.sqrt(1 - lam) batch_size = tf.shape(images)[0] if images.shape.rank == 4: image_height, image_width = tf.shape(images)[1], tf.shape(images)[2] fill_fn = _fill_rectangle elif images.shape.rank == 5: image_height, image_width = tf.shape(images)[2], tf.shape(images)[3] fill_fn = _fill_rectangle_video else: raise ValueError('Bad image rank: {}'.format(images.shape.rank)) cut_height = tf.cast( ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) cut_width = tf.cast( ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) random_center_height = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32) random_center_width = tf.random.uniform( shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32) bbox_area = cut_height * cut_width lam = 1. - bbox_area / (image_height * image_width) lam = tf.cast(lam, dtype=tf.float32) images = tf.map_fn( lambda x: fill_fn(*x), (images, random_center_width, random_center_height, cut_width // 2, cut_height // 2, tf.reverse(images, [0])), dtype=( images.dtype, tf.int32, tf.int32, tf.int32, tf.int32, images.dtype), fn_output_signature=tf.TensorSpec(images.shape[1:], dtype=images.dtype)) return images, labels, lam def _mixup(self, images: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """Applies mixup.""" lam = MixupAndCutmix._sample_from_beta(self.mixup_alpha, self.mixup_alpha, tf.shape(labels)) if images.shape.rank == 4: lam = tf.reshape(lam, [-1, 1, 1, 1]) elif images.shape.rank == 5: lam = tf.reshape(lam, [-1, 1, 1, 1, 1]) else: raise ValueError('Bad image rank: {}'.format(images.shape.rank)) lam_cast = tf.cast(lam, dtype=images.dtype) images = lam_cast * images + (1. - lam_cast) * tf.reverse(images, [0]) return images, labels, tf.squeeze(lam) def _smooth_labels(self, labels: tf.Tensor) -> tf.Tensor: off_value = self.label_smoothing / self.num_classes on_value = 1. - self.label_smoothing + off_value smooth_labels = tf.one_hot( labels, self.num_classes, on_value=on_value, off_value=off_value) return smooth_labels def _update_labels(self, images: tf.Tensor, labels: tf.Tensor, lam: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: labels_1 = self._smooth_labels(labels) labels_2 = tf.reverse(labels_1, [0]) lam = tf.reshape(lam, [-1, 1]) labels = lam * labels_1 + (1. - lam) * labels_2 return images, labels
102,890
36.064481
130
py
models
models-master/official/vision/ops/spatial_transform_ops.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Spatial transform ops.""" from typing import Dict, Tuple import numpy as np import tensorflow as tf from official.vision.ops.box_ops import bbox2mask _EPSILON = 1e-8 def _feature_bilinear_interpolation(features: tf.Tensor, kernel_y: tf.Tensor, kernel_x: tf.Tensor) -> tf.Tensor: """Feature bilinear interpolation. The RoIAlign feature f can be computed by bilinear interpolation of four neighboring feature points f0, f1, f2, and f3. f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T [f10, f11]] f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11 f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11 kernel_y = [hy, ly] kernel_x = [hx, lx] Args: features: The features are in shape of [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]. kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1]. kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1]. Returns: A 5-D tensor representing feature crop of shape [batch_size, num_boxes, output_size, output_size, num_filters]. """ features_shape = tf.shape(features) batch_size, num_boxes, output_size, num_filters = ( features_shape[0], features_shape[1], features_shape[2], features_shape[4]) output_size = output_size // 2 kernel_y = tf.reshape(kernel_y, [batch_size, num_boxes, output_size * 2, 1]) kernel_x = tf.reshape(kernel_x, [batch_size, num_boxes, 1, output_size * 2]) # Use implicit broadcast to generate the interpolation kernel. The # multiplier `4` is for avg pooling. interpolation_kernel = kernel_y * kernel_x * 4 # Interpolate the gathered features with computed interpolation kernels. features *= tf.cast( tf.expand_dims(interpolation_kernel, axis=-1), dtype=features.dtype) features = tf.reshape( features, [batch_size * num_boxes, output_size * 2, output_size * 2, num_filters]) features = tf.nn.avg_pool(features, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID') features = tf.reshape( features, [batch_size, num_boxes, output_size, output_size, num_filters]) return features def _compute_grid_positions( boxes: tf.Tensor, boundaries: tf.Tensor, output_size: int, sample_offset: float) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Computes the grid position w.r.t. the corresponding feature map. Args: boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the information of each box w.r.t. the corresponding feature map. boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float) in terms of the number of pixels of the corresponding feature map size. boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing the boundary (in (y, x)) of the corresponding feature map for each box. Any resampled grid points that go beyond the bounary will be clipped. output_size: a scalar indicating the output crop size. sample_offset: a float number in [0, 1] indicates the subpixel sample offset from grid point. Returns: kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1]. kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1]. box_grid_y0y1: Tensor of size [batch_size, boxes, output_size, 2] box_grid_x0x1: Tensor of size [batch_size, boxes, output_size, 2] """ boxes_shape = tf.shape(boxes) batch_size, num_boxes = boxes_shape[0], boxes_shape[1] if batch_size is None: batch_size = tf.shape(boxes)[0] box_grid_x = [] box_grid_y = [] for i in range(output_size): box_grid_x.append(boxes[:, :, 1] + (i + sample_offset) * boxes[:, :, 3] / output_size) box_grid_y.append(boxes[:, :, 0] + (i + sample_offset) * boxes[:, :, 2] / output_size) box_grid_x = tf.stack(box_grid_x, axis=2) box_grid_y = tf.stack(box_grid_y, axis=2) box_grid_y0 = tf.floor(box_grid_y) box_grid_x0 = tf.floor(box_grid_x) box_grid_x0 = tf.maximum(tf.cast(0., dtype=box_grid_x0.dtype), box_grid_x0) box_grid_y0 = tf.maximum(tf.cast(0., dtype=box_grid_y0.dtype), box_grid_y0) box_grid_x0 = tf.minimum(box_grid_x0, tf.expand_dims(boundaries[:, :, 1], -1)) box_grid_x1 = tf.minimum(box_grid_x0 + 1, tf.expand_dims(boundaries[:, :, 1], -1)) box_grid_y0 = tf.minimum(box_grid_y0, tf.expand_dims(boundaries[:, :, 0], -1)) box_grid_y1 = tf.minimum(box_grid_y0 + 1, tf.expand_dims(boundaries[:, :, 0], -1)) box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x1], axis=-1) box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y1], axis=-1) # The RoIAlign feature f can be computed by bilinear interpolation of four # neighboring feature points f0, f1, f2, and f3. # f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T # [f10, f11]] # f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11 # f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11 ly = box_grid_y - box_grid_y0 lx = box_grid_x - box_grid_x0 hy = 1.0 - ly hx = 1.0 - lx kernel_y = tf.reshape( tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size, 2, 1]) kernel_x = tf.reshape( tf.stack([hx, lx], axis=3), [batch_size, num_boxes, output_size, 2, 1]) return kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 def multilevel_crop_and_resize(features: Dict[str, tf.Tensor], boxes: tf.Tensor, output_size: int = 7, sample_offset: float = 0.5) -> tf.Tensor: """Crop and resize on multilevel feature pyramid. Generate the (output_size, output_size) set of pixels for each input box by first locating the box into the correct feature level, and then cropping and resizing it using the correspoding feature map of that level. Args: features: A dictionary with key as pyramid level and value as features. The features are in shape of [batch_size, height_l, width_l, num_filters]. boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents a box with [y1, x1, y2, x2] in un-normalized coordinates. output_size: A scalar to indicate the output crop size. sample_offset: a float number in [0, 1] indicates the subpixel sample offset from grid point. Returns: A 5-D tensor representing feature crop of shape [batch_size, num_boxes, output_size, output_size, num_filters]. """ with tf.name_scope('multilevel_crop_and_resize'): levels = list(features.keys()) min_level = int(min(levels)) max_level = int(max(levels)) features_shape = tf.shape(features[str(min_level)]) batch_size, max_feature_height, max_feature_width, num_filters = ( features_shape[0], features_shape[1], features_shape[2], features_shape[3]) num_boxes = tf.shape(boxes)[1] # Stack feature pyramid into a features_all of shape # [batch_size, levels, height, width, num_filters]. features_all = [] feature_heights = [] feature_widths = [] for level in range(min_level, max_level + 1): shape = features[str(level)].get_shape().as_list() feature_heights.append(shape[1]) feature_widths.append(shape[2]) # Concat tensor of [batch_size, height_l * width_l, num_filters] for each # levels. features_all.append( tf.reshape(features[str(level)], [batch_size, -1, num_filters])) features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters]) # Calculate height_l * width_l for each level. level_dim_sizes = [ feature_widths[i] * feature_heights[i] for i in range(len(feature_widths)) ] # level_dim_offsets is accumulated sum of level_dim_size. level_dim_offsets = [0] for i in range(len(feature_widths) - 1): level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i]) batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1] level_dim_offsets = tf.constant(level_dim_offsets, tf.int32) height_dim_sizes = tf.constant(feature_widths, tf.int32) # Assigns boxes to the right level. box_width = boxes[:, :, 3] - boxes[:, :, 1] box_height = boxes[:, :, 2] - boxes[:, :, 0] areas_sqrt = tf.sqrt( tf.cast(box_height, tf.float32) * tf.cast(box_width, tf.float32)) levels = tf.cast( tf.math.floordiv( tf.math.log(tf.math.divide_no_nan(areas_sqrt, 224.0)), tf.math.log(2.0)) + 4.0, dtype=tf.int32) # Maps levels between [min_level, max_level]. levels = tf.minimum(max_level, tf.maximum(levels, min_level)) # Projects box location and sizes to corresponding feature levels. scale_to_level = tf.cast( tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)), dtype=boxes.dtype) boxes /= tf.expand_dims(scale_to_level, axis=2) box_width /= scale_to_level box_height /= scale_to_level boxes = tf.concat([boxes[:, :, 0:2], tf.expand_dims(box_height, -1), tf.expand_dims(box_width, -1)], axis=-1) # Maps levels to [0, max_level-min_level]. levels -= min_level level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32)) boundary = tf.cast( tf.concat([ tf.expand_dims( [[tf.cast(max_feature_height, tf.float32)]] / level_strides - 1, axis=-1), tf.expand_dims( [[tf.cast(max_feature_width, tf.float32)]] / level_strides - 1, axis=-1), ], axis=-1), boxes.dtype) # Compute grid positions. kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = _compute_grid_positions( boxes, boundary, output_size, sample_offset) x_indices = tf.cast( tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]), dtype=tf.int32) y_indices = tf.cast( tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]), dtype=tf.int32) batch_size_offset = tf.tile( tf.reshape( tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]), [1, num_boxes, output_size * 2, output_size * 2]) # Get level offset for each box. Each box belongs to one level. levels_offset = tf.tile( tf.reshape( tf.gather(level_dim_offsets, levels), [batch_size, num_boxes, 1, 1]), [1, 1, output_size * 2, output_size * 2]) y_indices_offset = tf.tile( tf.reshape( y_indices * tf.expand_dims(tf.gather(height_dim_sizes, levels), -1), [batch_size, num_boxes, output_size * 2, 1]), [1, 1, 1, output_size * 2]) x_indices_offset = tf.tile( tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]), [1, 1, output_size * 2, 1]) indices = tf.reshape( batch_size_offset + levels_offset + y_indices_offset + x_indices_offset, [-1]) # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar # performance. features_per_box = tf.reshape( tf.gather(features_r2, indices), [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]) # Bilinear interpolation. features_per_box = _feature_bilinear_interpolation( features_per_box, kernel_y, kernel_x) return features_per_box def _selective_crop_and_resize(features: tf.Tensor, boxes: tf.Tensor, box_levels: tf.Tensor, boundaries: tf.Tensor, output_size: int = 7, sample_offset: float = 0.5, use_einsum_gather: bool = False) -> tf.Tensor: """Crop and resize boxes on a set of feature maps. Given multiple features maps indexed by different levels, and a set of boxes where each box is mapped to a certain level, it selectively crops and resizes boxes from the corresponding feature maps to generate the box features. We follow the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf, figure 3 for reference). Specifically, for each feature map, we select an (output_size, output_size) set of pixels corresponding to the box location, and then use bilinear interpolation to select the feature value for each pixel. For performance, we perform the gather and interpolation on all layers as a single operation. In this op the multi-level features are first stacked and gathered into [2*output_size, 2*output_size] feature points. Then bilinear interpolation is performed on the gathered feature points to generate [output_size, output_size] RoIAlign feature map. Here is the step-by-step algorithm: 1. The multi-level features are gathered into a [batch_size, num_boxes, output_size*2, output_size*2, num_filters] Tensor. The Tensor contains four neighboring feature points for each vertex in the output grid. 2. Compute the interpolation kernel of shape [batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis can be seen as stacking 2x2 interpolation kernels for all vertices in the output grid. 3. Element-wise multiply the gathered features and interpolation kernel. Then apply 2x2 average pooling to reduce spatial dimension to output_size. Args: features: a 5-D tensor of shape [batch_size, num_levels, max_height, max_width, num_filters] where cropping and resizing are based. boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the information of each box w.r.t. the corresponding feature map. boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float) in terms of the number of pixels of the corresponding feature map size. box_levels: a 3-D tensor of shape [batch_size, num_boxes, 1] representing the 0-based corresponding feature level index of each box. boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing the boundary (in (y, x)) of the corresponding feature map for each box. Any resampled grid points that go beyond the bounary will be clipped. output_size: a scalar indicating the output crop size. sample_offset: a float number in [0, 1] indicates the subpixel sample offset from grid point. use_einsum_gather: use einsum to replace gather or not. Replacing einsum with gather can improve performance when feature size is not large, einsum is friendly with model partition as well. Gather's performance is better when feature size is very large and there are multiple box levels. Returns: features_per_box: a 5-D tensor of shape [batch_size, num_boxes, output_size, output_size, num_filters] representing the cropped features. """ (batch_size, num_levels, max_feature_height, max_feature_width, num_filters) = features.get_shape().as_list() if batch_size is None: batch_size = tf.shape(features)[0] _, num_boxes, _ = boxes.get_shape().as_list() kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = _compute_grid_positions( boxes, boundaries, output_size, sample_offset) x_indices = tf.cast( tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]), dtype=tf.int32) y_indices = tf.cast( tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]), dtype=tf.int32) if use_einsum_gather: # Blinear interpolation is done during the last two gathers: # f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T # [f10, f11]] # [[f00, f01], # [f10, f11]] = tf.einsum(tf.einsum(features, y_one_hot), x_one_hot) # where [hy, ly] and [hx, lx] are the bilinear interpolation kernel. y_indices = tf.cast( tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size, 2]), dtype=tf.int32) x_indices = tf.cast( tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size, 2]), dtype=tf.int32) # shape is [batch_size, num_boxes, output_size, 2, height] grid_y_one_hot = tf.one_hot( tf.cast(y_indices, tf.int32), max_feature_height, dtype=kernel_y.dtype) # shape is [batch_size, num_boxes, output_size, 2, width] grid_x_one_hot = tf.one_hot( tf.cast(x_indices, tf.int32), max_feature_width, dtype=kernel_x.dtype) # shape is [batch_size, num_boxes, output_size, height] grid_y_weight = tf.reduce_sum( tf.multiply(grid_y_one_hot, kernel_y), axis=-2) # shape is [batch_size, num_boxes, output_size, width] grid_x_weight = tf.reduce_sum( tf.multiply(grid_x_one_hot, kernel_x), axis=-2) # Gather for y_axis. # shape is [batch_size, num_boxes, output_size, width, features] features_per_box = tf.einsum('bmhwf,bmoh->bmowf', features, tf.cast(grid_y_weight, features.dtype)) # Gather for x_axis. # shape is [batch_size, num_boxes, output_size, output_size, features] features_per_box = tf.einsum('bmhwf,bmow->bmhof', features_per_box, tf.cast(grid_x_weight, features.dtype)) else: height_dim_offset = max_feature_width level_dim_offset = max_feature_height * height_dim_offset batch_dim_offset = num_levels * level_dim_offset batch_size_offset = tf.tile( tf.reshape( tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]), [1, num_boxes, output_size * 2, output_size * 2]) box_levels_offset = tf.tile( tf.reshape(box_levels * level_dim_offset, [batch_size, num_boxes, 1, 1]), [1, 1, output_size * 2, output_size * 2]) y_indices_offset = tf.tile( tf.reshape(y_indices * height_dim_offset, [batch_size, num_boxes, output_size * 2, 1]), [1, 1, 1, output_size * 2]) x_indices_offset = tf.tile( tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]), [1, 1, output_size * 2, 1]) indices = tf.reshape( batch_size_offset + box_levels_offset + y_indices_offset + x_indices_offset, [-1]) features = tf.reshape(features, [-1, num_filters]) # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar # performance. features_per_box = tf.reshape( tf.gather(features, indices), [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]) features_per_box = _feature_bilinear_interpolation( features_per_box, kernel_y, kernel_x) return features_per_box def crop_mask_in_target_box(masks: tf.Tensor, boxes: tf.Tensor, target_boxes: tf.Tensor, output_size: int, sample_offset: float = 0.0, use_einsum: bool = True) -> tf.Tensor: """Crop masks in target boxes. Args: masks: A tensor with a shape of [batch_size, num_masks, height, width]. boxes: a float tensor representing box cooridnates that tightly enclose masks with a shape of [batch_size, num_masks, 4] in un-normalized coordinates. A box is represented by [ymin, xmin, ymax, xmax]. target_boxes: a float tensor representing target box cooridnates for masks with a shape of [batch_size, num_masks, 4] in un-normalized coordinates. A box is represented by [ymin, xmin, ymax, xmax]. output_size: A scalar to indicate the output crop size. It currently only supports to output a square shape outputs. sample_offset: a float number in [0, 1] indicates the subpixel sample offset from grid point. use_einsum: Use einsum to replace gather in selective_crop_and_resize. Returns: A 4-D tensor representing feature crop of shape [batch_size, num_boxes, output_size, output_size]. """ with tf.name_scope('crop_mask_in_target_box'): # Cast to float32, as the y_transform and other transform variables may # overflow in float16 masks = tf.cast(masks, tf.float32) boxes = tf.cast(boxes, tf.float32) target_boxes = tf.cast(target_boxes, tf.float32) batch_size, num_masks, height, width = masks.get_shape().as_list() if batch_size is None: batch_size = tf.shape(masks)[0] masks = tf.reshape(masks, [batch_size * num_masks, height, width, 1]) # Pad zeros on the boundary of masks. masks = tf.image.pad_to_bounding_box(masks, 2, 2, height + 4, width + 4) masks = tf.reshape(masks, [batch_size, num_masks, height+4, width+4, 1]) # Projects target box locations and sizes to corresponding cropped # mask coordinates. gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split( value=boxes, num_or_size_splits=4, axis=2) bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split( value=target_boxes, num_or_size_splits=4, axis=2) y_transform = (bb_y_min - gt_y_min) * height / ( gt_y_max - gt_y_min + _EPSILON) + 2 x_transform = (bb_x_min - gt_x_min) * height / ( gt_x_max - gt_x_min + _EPSILON) + 2 h_transform = (bb_y_max - bb_y_min) * width / ( gt_y_max - gt_y_min + _EPSILON) w_transform = (bb_x_max - bb_x_min) * width / ( gt_x_max - gt_x_min + _EPSILON) boundaries = tf.concat( [tf.ones_like(y_transform) * ((height + 4) - 1), tf.ones_like(x_transform) * ((width + 4) - 1)], axis=-1) boundaries = tf.cast(boundaries, dtype=y_transform.dtype) # Reshape tensors to have the right shape for selective_crop_and_resize. trasnformed_boxes = tf.concat( [y_transform, x_transform, h_transform, w_transform], -1) levels = tf.tile(tf.reshape(tf.range(num_masks), [1, num_masks]), [batch_size, 1]) cropped_masks = _selective_crop_and_resize( masks, trasnformed_boxes, levels, boundaries, output_size, sample_offset=sample_offset, use_einsum_gather=use_einsum) cropped_masks = tf.squeeze(cropped_masks, axis=-1) return cropped_masks def nearest_upsampling(data: tf.Tensor, scale: int, use_keras_layer: bool = False) -> tf.Tensor: """Nearest neighbor upsampling implementation. Args: data: A tensor with a shape of [batch, height_in, width_in, channels]. scale: An integer multiple to scale resolution of input data. use_keras_layer: If True, use keras Upsampling2D layer. Returns: data_up: A tensor with a shape of [batch, height_in*scale, width_in*scale, channels]. Same dtype as input data. """ if use_keras_layer: return tf.keras.layers.UpSampling2D(size=(scale, scale), interpolation='nearest')(data) with tf.name_scope('nearest_upsampling'): bs, _, _, c = data.get_shape().as_list() shape = tf.shape(input=data) h = shape[1] w = shape[2] bs = -1 if bs is None else bs # Uses reshape to quickly upsample the input. The nearest pixel is selected # via tiling. data = tf.tile( tf.reshape(data, [bs, h, 1, w, 1, c]), [1, 1, scale, 1, scale, 1]) return tf.reshape(data, [bs, h * scale, w * scale, c]) def _gather_rows_from_matrix(input_matrix: tf.Tensor, row_indices: tf.Tensor) -> tf.Tensor: """Gather rows from the input matrix (2-D tensor). This operation is equivalent to tf.gather(input_matrix, row_indices), but is implemented in sparse matrix multiplication. Args: input_matrix: A 2-D tensor in shape (input_h, input_w) from which to gather values. The shape must be 2-D, since sparse matrix multiplication is currently only supported on 2-D matrices. row_indices: A 1-D int tensor in shape (output_h) which stored the row indices of the input. Returns: A tensor in shape (output_h, input_w) which stores the gathered rows. """ input_matrix_shape = input_matrix.get_shape().as_list() if len(input_matrix_shape) != 2: raise ValueError( 'Expected the input_matrix tensor (input_h, input_w) has rank == 2, ' 'was: %s' % input_matrix_shape) row_indices_shape = row_indices.get_shape().as_list() if len(row_indices_shape) != 1: raise ValueError( 'Expected the row_indices tensor (output_h) has rank == 1, was: %s' % row_indices_shape) # (output_h, input_h) indices_one_hot = tf.one_hot( row_indices, depth=input_matrix_shape[0], dtype=input_matrix.dtype) # Matrix multiplication: (output_h, input_h) x (input_h, input_w) # (output_h, input_w) return tf.linalg.matmul(indices_one_hot, input_matrix, a_is_sparse=True) def bilinear_resize_to_bbox( images: tf.Tensor, bbox: tf.Tensor, output_size: tf.Tensor ) -> tf.Tensor: """Bilinear resizes the images to fit into the bounding boxes in the output. Args: images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary numbers of channel dimensions. bbox: A tensor in shape (batch_size, 4), representing the absolute coordinates (ymin, xmin, ymax, xmax) for each bounding box. output_size: The size of the output images in (output_h, output_w). Returns: A tensor in shape (batch_size, output_h, output_w, ...). The result has the same dtype as the input if it's float32, float16, bfloat16, otherwise the result is float32. """ images_shape = images.get_shape().as_list() images_rank = len(images_shape) if images_rank < 3: raise ValueError( 'Expected the input images (batch_size, height, width, ...) ' 'has rank >= 3, was: %s' % images_shape) bbox_shape = bbox.get_shape().as_list() if bbox_shape[-1] != 4: raise ValueError( 'Expected the last dimension of `bbox` has size == 4, but the shape ' 'of `bbox` was: %s' % bbox_shape) rank_range = list(range(images_rank)) extra_dims = images_shape[3:] extra_dims_perm = rank_range[3:] extra_dims_product = 1 for d in extra_dims: extra_dims_product *= d input_h = tf.cast(tf.shape(images)[1], tf.float32) input_w = tf.cast(tf.shape(images)[2], tf.float32) output_h = output_size[0] output_w = output_size[1] bbox = tf.cast(bbox, tf.float32) # (batch_size, 1) bbox_ymin = bbox[:, 0:1] bbox_xmin = bbox[:, 1:2] bbox_ymax = bbox[:, 2:3] bbox_xmax = bbox[:, 3:4] bbox_h = bbox_ymax - bbox_ymin bbox_w = bbox_xmax - bbox_xmin scale_h = tf.math.divide_no_nan(input_h, bbox_h) scale_w = tf.math.divide_no_nan(input_w, bbox_w) # Generates the output grids. # (output_h) output_y_grid = tf.range(output_h, dtype=bbox_ymin.dtype) # (output_w) output_x_grid = tf.range(output_w, dtype=bbox_xmin.dtype) # Computes the input source positions (float) which map to the output grids # (integer). # Applies half pixel offset here to ensure the output is center-aligned to the # input. # TODO(b/245614786): support align_corners=True. # (batch_size, output_h) input_y_pos = tf.clip_by_value( (output_y_grid - bbox_ymin + 0.5) * scale_h - 0.5, 0.0, input_h - 1.0) # (batch_size, output_w) input_x_pos = tf.clip_by_value( (output_x_grid - bbox_xmin + 0.5) * scale_w - 0.5, 0.0, input_w - 1.0) # Gets the positions (integer) of the four nearest neighbors of the input # source position (float). # (y0, x0): left-top # (y0, x1): right-top # (y1, x0): left-bottom # (y1, x1): right-bottom # (batch_size, output_h) input_y0 = tf.cast( tf.clip_by_value(tf.floor(input_y_pos), 0.0, input_h - 2.0), tf.int32) input_y1 = input_y0 + 1 # (batch_size, output_w) input_x0 = tf.cast( tf.clip_by_value(tf.floor(input_x_pos), 0.0, input_w - 2.0), tf.int32) input_x1 = input_x0 + 1 # (batch_size, output_h) output_y_mask = (bbox_ymin <= output_y_grid) & (output_y_grid < bbox_ymax) # (batch_size, output_w) output_x_mask = (bbox_xmin <= output_x_grid) & (output_x_grid < bbox_xmax) # Masks the output pixels outside the bounding box by setting their input # neighbors to -1. This makes `tf.one_hot` operation produce all zeros at # these pixels, so as to accelerate the sparse matrix multiplication in # `_gather_rows_from_matrix`. # (batch_size, output_h) input_y0 = tf.where(output_y_mask, input_y0, -tf.ones_like(input_y0)) input_y1 = tf.where(output_y_mask, input_y1, -tf.ones_like(input_y1)) # (batch_size, output_w) input_x0 = tf.where(output_x_mask, input_x0, -tf.ones_like(input_x0)) input_x1 = tf.where(output_x_mask, input_x1, -tf.ones_like(input_x1)) input_h = tf.cast(input_h, tf.int32) input_w = tf.cast(input_w, tf.int32) if images.dtype not in {tf.float32, tf.bfloat16, tf.float16}: images = tf.cast(images, tf.float32) if images_rank > 3: # Reshapes the images since _gather_rows_from_matrix only takes 2-D tensor. # (batch_size, input_h, input_w * extra_dims_product) images = tf.reshape(images, [-1, input_h, input_w * extra_dims_product]) # Fetches the rows from the input source images. # (batch_size, output_h, input_w * extra_dims_product) val_y0 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(images, input_y0), fn_output_signature=images.dtype, parallel_iterations=32, ) val_y1 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(images, input_y1), fn_output_signature=images.dtype, parallel_iterations=32, ) if images_rank > 3: new_shape = [-1, output_h, input_w] + extra_dims # (batch_size, output_h, input_w, ...) val_y0 = tf.reshape(val_y0, new_shape) val_y1 = tf.reshape(val_y1, new_shape) # Transposes the tensors for reusing _gather_rows_from_matrix later. new_perm = [0, 2, 1] + extra_dims_perm # (batch_size, input_w, output_h, ...) val_y0 = tf.transpose(val_y0, new_perm) val_y1 = tf.transpose(val_y1, new_perm) if images_rank > 3: new_shape = [-1, input_w, output_h * extra_dims_product] # (batch_size, input_w, output_h * extra_dims_product) val_y0 = tf.reshape(val_y0, new_shape) val_y1 = tf.reshape(val_y1, new_shape) # Fetches the pixels from the rows using the column indices. # val_00, val_01, val_10, val_11 store the pixels of the four nearest # neighbors of the input source position. # (batch_size, output_w, output_h * extra_dims_product) val_00 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(val_y0, input_x0), fn_output_signature=images.dtype, parallel_iterations=32, ) val_01 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(val_y0, input_x1), fn_output_signature=images.dtype, parallel_iterations=32, ) val_10 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(val_y1, input_x0), fn_output_signature=images.dtype, parallel_iterations=32, ) val_11 = tf.map_fn( lambda x: _gather_rows_from_matrix(x[0], x[1]), elems=(val_y1, input_x1), fn_output_signature=images.dtype, parallel_iterations=32, ) if images_rank > 3: new_shape = [-1, output_w, output_h] + extra_dims # (batch_size, output_w, output_h, ...) val_00 = tf.reshape(val_00, new_shape) val_01 = tf.reshape(val_01, new_shape) val_10 = tf.reshape(val_10, new_shape) val_11 = tf.reshape(val_11, new_shape) # (..., batch_size, output_h, output_w) new_perm = extra_dims_perm + [0, 2, 1] val_00 = tf.transpose(val_00, new_perm) val_01 = tf.transpose(val_01, new_perm) val_10 = tf.transpose(val_10, new_perm) val_11 = tf.transpose(val_11, new_perm) # (batch_size, output_height, 1) input_y_pos = tf.cast(input_y_pos[:, :, tf.newaxis], images.dtype) input_y0 = tf.cast(input_y0[:, :, tf.newaxis], images.dtype) input_y1 = tf.cast(input_y1[:, :, tf.newaxis], images.dtype) # (batch_size, 1, output_width) input_x_pos = tf.cast(input_x_pos[:, tf.newaxis, :], images.dtype) input_x0 = tf.cast(input_x0[:, tf.newaxis, :], images.dtype) input_x1 = tf.cast(input_x1[:, tf.newaxis, :], images.dtype) # Compute the weights of the four nearest neighbors for interpolation. # (batch_size, output_height, output_width) weight_00 = (input_y1 - input_y_pos) * (input_x1 - input_x_pos) weight_01 = (input_y1 - input_y_pos) * (input_x_pos - input_x0) weight_10 = (input_y_pos - input_y0) * (input_x1 - input_x_pos) weight_11 = (input_y_pos - input_y0) * (input_x_pos - input_x0) # (..., batch_size, output_height, output_width) output_images = ( val_00 * weight_00 + val_01 * weight_01 + val_10 * weight_10 + val_11 * weight_11) # (batch_size, output_height, output_width, ...) return tf.transpose(output_images, np.roll(rank_range, -len(extra_dims))) def bilinear_resize_with_crop_and_pad(images: tf.Tensor, rescale_size: tf.Tensor, crop_offset: tf.Tensor, crop_size: tf.Tensor, output_size: tf.Tensor) -> tf.Tensor: """Bilinear resizes the images, then crops and finally pads to output size. Args: images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary numbers of channel dimensions. rescale_size: An int tensor in shape (batch_size, 2), representing the sizes of the rescaled images. crop_offset: An int tensor in shape (batch_size, 2), representing the left-top offset of the crop box. Applying negative offsets means adding extra margins at the left-top. crop_size: An int tensor in shape (batch_size, 2), representing the sizes of the cropped images. output_size: The size of the output image in (output_h, output_w). Returns: A tensor in shape (batch_size, output_h, output_w, ...). The result has the same dtype as the input if it's float32, float16, bfloat16, otherwise the result is float32. """ images_shape = images.get_shape().as_list() images_rank = len(images_shape) if images_rank < 3: raise ValueError( 'Expected the input images (batch_size, height, width, ...) ' 'has rank >= 3, was: %s' % images_shape) num_extra_dims = images_rank - 3 # Rescales the images, applies the offset and pastes to the output canvas. # (batch_size, 2) ymin_xmin = -crop_offset # (batch_size, 2) ymax_xmax = ymin_xmin + tf.cast(rescale_size, ymin_xmin.dtype) # (batch_size, 4) rescale_bbox = tf.concat([ymin_xmin, ymax_xmax], axis=1) # (batch_size, output_height, output_width, ...) rescaled_padded_images = bilinear_resize_to_bbox(images, rescale_bbox, output_size) # Masks out the pixels outside of the crop box. # (batch_size, 2) y0_x0 = tf.broadcast_to( tf.constant([[0, 0]], dtype=crop_size.dtype), tf.shape(crop_size)) # (batch_size, 4) crop_bbox = tf.concat([y0_x0, crop_size], axis=1) # (batch_size, output_height, output_width, ...) crop_bbox_mask = bbox2mask( crop_bbox, image_height=output_size[0], image_width=output_size[1], dtype=rescaled_padded_images.dtype)[[...] + [tf.newaxis] * num_extra_dims] # (batch_size, output_height, output_width, ...) return rescaled_padded_images * crop_bbox_mask def bilinear_resize_with_pad( images: tf.Tensor, rescale_size: tf.Tensor, output_size: tf.Tensor ) -> tf.Tensor: """Bilinear resizes the images, then pads to output size. Args: images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary numbers of channel dimensions. rescale_size: An int tensor in shape (2,) or (batch_size, 2), representing the sizes of the rescaled images. output_size: The size of the output image in (output_h, output_w). Returns: A tensor in shape (batch_size, output_h, output_w, ...). The result has the same dtype as the input if it's float32, float16, bfloat16, otherwise the result is float32. """ images_shape = images.get_shape().as_list() images_rank = len(images_shape) if images_rank < 3: raise ValueError( 'Expected the input images (batch_size, height, width, ...) ' 'has rank >= 3, was: %s' % images_shape ) batch_size = tf.shape(images)[0] rescale_size = tf.convert_to_tensor(rescale_size) if len(rescale_size.get_shape().as_list()) == 1: rescale_size = tf.broadcast_to(rescale_size, [batch_size, 2]) # Rescales the images, applies the offset and pastes to the output canvas. # (batch_size, 2) ymin_xmin = tf.broadcast_to([0, 0], [batch_size, 2]) # (batch_size, 2) ymax_xmax = tf.cast(ymin_xmin, rescale_size.dtype) + rescale_size # (batch_size, 4) rescale_bbox = tf.concat([ymin_xmin, ymax_xmax], axis=1) # (batch_size, output_height, output_width, ...) return bilinear_resize_to_bbox(images, rescale_bbox, output_size) def bilinear_resize(images: tf.Tensor, output_size: tf.Tensor) -> tf.Tensor: """Bilinear resizes the images. Args: images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary numbers of channel dimensions. output_size: The size of the output image in (output_h, output_w). Returns: A tensor in shape (batch_size, output_h, output_w, ...). The result has the same dtype as the input if it's float32, float16, bfloat16, otherwise the result is float32. """ return bilinear_resize_with_pad( images, rescale_size=output_size, output_size=output_size )
38,379
40.536797
80
py
models
models-master/official/vision/losses/maskrcnn_losses.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses for maskrcnn model.""" # Import libraries import tensorflow as tf class RpnScoreLoss(object): """Region Proposal Network score loss function.""" def __init__(self, rpn_batch_size_per_im): self._rpn_batch_size_per_im = rpn_batch_size_per_im self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy( reduction=tf.keras.losses.Reduction.SUM, from_logits=True) def __call__(self, score_outputs, labels): """Computes total RPN detection loss. Computes total RPN detection loss including box and score from all levels. Args: score_outputs: an OrderDict with keys representing levels and values representing scores in [batch_size, height, width, num_anchors]. labels: the dictionary that returned from dataloader that includes ground-truth targets. Returns: rpn_score_loss: a scalar tensor representing total score loss. """ with tf.name_scope('rpn_loss'): levels = sorted(score_outputs.keys()) score_losses = [] for level in levels: score_losses.append( self._rpn_score_loss( score_outputs[level], labels[level], normalizer=tf.cast( tf.shape(score_outputs[level])[0] * self._rpn_batch_size_per_im, dtype=score_outputs[level].dtype))) # Sums per level losses to total loss. return tf.math.add_n(score_losses) def _rpn_score_loss(self, score_outputs, score_targets, normalizer=1.0): """Computes score loss.""" # score_targets has three values: # (1) score_targets[i]=1, the anchor is a positive sample. # (2) score_targets[i]=0, negative. # (3) score_targets[i]=-1, the anchor is don't care (ignore). with tf.name_scope('rpn_score_loss'): mask = tf.math.logical_or(tf.math.equal(score_targets, 1), tf.math.equal(score_targets, 0)) score_targets = tf.math.maximum(score_targets, tf.zeros_like(score_targets)) score_targets = tf.expand_dims(score_targets, axis=-1) score_outputs = tf.expand_dims(score_outputs, axis=-1) score_loss = self._binary_crossentropy( score_targets, score_outputs, sample_weight=mask) score_loss /= normalizer return score_loss class RpnBoxLoss(object): """Region Proposal Network box regression loss function.""" def __init__(self, huber_loss_delta: float): # The delta is typically around the mean value of regression target. # for instances, the regression targets of 512x512 input with 6 anchors on # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. self._huber_loss = tf.keras.losses.Huber( delta=huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) def __call__(self, box_outputs, labels): """Computes total RPN detection loss. Computes total RPN detection loss including box and score from all levels. Args: box_outputs: an OrderDict with keys representing levels and values representing box regression targets in [batch_size, height, width, num_anchors * 4]. labels: the dictionary that returned from dataloader that includes ground-truth targets. Returns: rpn_box_loss: a scalar tensor representing total box regression loss. """ with tf.name_scope('rpn_loss'): levels = sorted(box_outputs.keys()) box_losses = [] for level in levels: box_losses.append(self._rpn_box_loss(box_outputs[level], labels[level])) # Sum per level losses to total loss. return tf.add_n(box_losses) def _rpn_box_loss(self, box_outputs, box_targets, normalizer=1.0): """Computes box regression loss.""" with tf.name_scope('rpn_box_loss'): _, height, width, num_anchors_vertices = box_targets.get_shape().as_list() # (batch_size, height, width, num_anchors, 4) reshaped_box_targets = tf.reshape( box_targets, [-1, height, width, num_anchors_vertices // 4, 4]) # The box is valid if at least one of the ymin, xmin, ymax, ymax is not 0. # (batch_size, height, width, num_anchors) valid_mask = tf.reduce_any( tf.math.abs(reshaped_box_targets) > 1e-6, axis=-1) # (batch_size, height, width, num_anchors * 4) valid_mask = tf.cast( tf.repeat(valid_mask, 4, axis=-1), dtype=box_outputs.dtype) # (batch_size, height, width, num_anchors * 4, 1) box_targets = tf.expand_dims(box_targets, axis=-1) # (batch_size, height, width, num_anchors * 4, 1) box_outputs = tf.expand_dims(box_outputs, axis=-1) box_loss = self._huber_loss( box_targets, box_outputs, sample_weight=valid_mask) # The loss is normalized by the sum of non-zero weights and additional # normalizer provided by the function caller. Using + 0.01 here to avoid # division by zero. For each replica, get the sum of non-zero masks. Then # get the mean of sums from all replicas. Note there is an extra division # by `num_replicas` in train_step(). So it is equivalent to normalizing # the box loss by the global sum of non-zero masks. replica_context = tf.distribute.get_replica_context() valid_mask = tf.reduce_sum(valid_mask) valid_mask_mean = replica_context.all_reduce( tf.distribute.ReduceOp.MEAN, valid_mask ) box_loss /= normalizer * (valid_mask_mean + 0.01) return box_loss class FastrcnnClassLoss(object): """Fast R-CNN classification loss function.""" def __init__(self, use_binary_cross_entropy: bool = False, top_k_percent: float = 1.0): """Initializes loss computation. Args: use_binary_cross_entropy: If true, uses binary cross entropy loss, otherwise uses categorical cross entropy loss. top_k_percent: a float, the value lies in [0.0, 1.0]. When its value < 1., only aggregate the top k percent of losses. This is useful for hard example mining. """ self._use_binary_cross_entropy = use_binary_cross_entropy self._top_k_percent = top_k_percent def __call__(self, class_outputs, class_targets): """Computes the class loss (Fast-RCNN branch) of Mask-RCNN. This function implements the classification loss of the Fast-RCNN. The classification loss is categorical (or binary) cross entropy on all RoIs. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long Args: class_outputs: a float tensor representing the class prediction for each box with a shape of [batch_size, num_boxes, num_classes]. class_targets: a float tensor representing the class label for each box with a shape of [batch_size, num_boxes]. Returns: a scalar tensor representing total class loss. """ with tf.name_scope('fast_rcnn_loss'): num_classes = class_outputs.get_shape().as_list()[-1] class_targets_one_hot = tf.one_hot( tf.cast(class_targets, dtype=tf.int32), num_classes, dtype=class_outputs.dtype) if self._use_binary_cross_entropy: # (batch_size, num_boxes, num_classes) cross_entropy_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=class_targets_one_hot, logits=class_outputs) else: # (batch_size, num_boxes) cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits( labels=class_targets_one_hot, logits=class_outputs) if self._top_k_percent < 1.0: return self.aggregate_loss_top_k(cross_entropy_loss) else: return tf.reduce_mean(cross_entropy_loss) def aggregate_loss_top_k(self, loss, num_valid_values=None): """Aggregate the top-k the greatest loss values. Args: loss: a float tensor in shape (batch_size, num_boxes) or (batch_size, num_boxes, num_classes) which stores the loss values. num_valid_values: the number of loss values which are not ignored. The default value is None, which means all the loss values are valid. Returns: A 0-D float which stores the overall loss of the batch. """ loss = tf.reshape(loss, shape=[-1]) top_k_num = tf.cast( self._top_k_percent * tf.size(loss, out_type=tf.float32), tf.int32) top_k_losses, _ = tf.math.top_k(loss, k=top_k_num) normalizer = tf.cast(top_k_num, loss.dtype) if num_valid_values is not None: normalizer = tf.minimum(normalizer, tf.cast(num_valid_values, loss.dtype)) return tf.reduce_sum(top_k_losses) / (normalizer + 1e-5) class FastrcnnBoxLoss(object): """Fast R-CNN box regression loss function.""" def __init__(self, huber_loss_delta: float, class_agnostic_bbox_pred: bool = False): """Initiate Faster RCNN box loss. Args: huber_loss_delta: the delta is typically around the mean value of regression target. For instances, the regression targets of 512x512 input with 6 anchors on P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. class_agnostic_bbox_pred: if True, class agnostic bounding box prediction is performed. """ self._huber_loss = tf.keras.losses.Huber( delta=huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) self._class_agnostic_bbox_pred = class_agnostic_bbox_pred def __call__(self, box_outputs, class_targets, box_targets): """Computes the box loss (Fast-RCNN branch) of Mask-RCNN. This function implements the box regression loss of the Fast-RCNN. As the `box_outputs` produces `num_classes` boxes for each RoI, the reference model expands `box_targets` to match the shape of `box_outputs` and selects only the target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py) # pylint: disable=line-too-long Instead, this function selects the `box_outputs` by the `class_targets` so that it doesn't expand `box_targets`. The box loss is smooth L1-loss on only positive samples of RoIs. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long Args: box_outputs: a float tensor representing the box prediction for each box with a shape of [batch_size, num_boxes, num_classes * 4]. class_targets: a float tensor representing the class label for each box with a shape of [batch_size, num_boxes]. box_targets: a float tensor representing the box label for each box with a shape of [batch_size, num_boxes, 4]. Returns: box_loss: a scalar tensor representing total box regression loss. """ with tf.name_scope('fast_rcnn_loss'): class_targets = tf.cast(class_targets, dtype=tf.int32) if not self._class_agnostic_bbox_pred: box_outputs = self._assign_class_targets(box_outputs, class_targets) return self._fast_rcnn_box_loss(box_outputs, box_targets, class_targets) def _assign_class_targets(self, box_outputs, class_targets): """Selects the box from `box_outputs` based on `class_targets`, with which the box has the maximum overlap.""" _, num_rois, num_class_specific_boxes = box_outputs.get_shape().as_list() num_classes = num_class_specific_boxes // 4 box_outputs = tf.reshape(box_outputs, [-1, num_rois, num_classes, 4]) class_targets_ont_hot = tf.one_hot( class_targets, num_classes, dtype=box_outputs.dtype ) return tf.einsum('bnij,bni->bnj', box_outputs, class_targets_ont_hot) def _fast_rcnn_box_loss(self, box_outputs, box_targets, class_targets, normalizer=1.0): """Computes box regression loss.""" with tf.name_scope('fast_rcnn_box_loss'): mask = tf.tile( tf.expand_dims(tf.greater(class_targets, 0), axis=2), [1, 1, 4]) mask = tf.cast(mask, dtype=box_outputs.dtype) box_targets = tf.expand_dims(box_targets, axis=-1) box_outputs = tf.expand_dims(box_outputs, axis=-1) box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask) # The loss is normalized by the number of ones in mask, # additional normalizer provided by the user and using 0.01 here to avoid # division by 0. For each replica, get the sum of non-zero masks. Then # get the mean of sums from all replicas. Note there is an extra division # by `num_replicas` in train_step(). So it is equivalent to normalizing # the box loss by the global sum of non-zero masks. replica_context = tf.distribute.get_replica_context() mask = tf.reduce_sum(mask) mask_mean = replica_context.all_reduce( tf.distribute.ReduceOp.MEAN, mask ) box_loss /= normalizer * (mask_mean + 0.01) return box_loss class MaskrcnnLoss(object): """Mask R-CNN instance segmentation mask loss function.""" def __init__(self): self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy( reduction=tf.keras.losses.Reduction.SUM, from_logits=True) def __call__(self, mask_outputs, mask_targets, select_class_targets): """Computes the mask loss of Mask-RCNN. This function implements the mask loss of Mask-RCNN. As the `mask_outputs` produces `num_classes` masks for each RoI, the reference model expands `mask_targets` to match the shape of `mask_outputs` and selects only the target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py) # pylint: disable=line-too-long Instead, this implementation selects the `mask_outputs` by the `class_targets` so that it doesn't expand `mask_targets`. Note that the selection logic is done in the post-processing of mask_rcnn_fn in mask_rcnn_architecture.py. Args: mask_outputs: a float tensor representing the prediction for each mask, with a shape of [batch_size, num_masks, mask_height, mask_width]. mask_targets: a float tensor representing the binary mask of ground truth labels for each mask with a shape of [batch_size, num_masks, mask_height, mask_width]. select_class_targets: a tensor with a shape of [batch_size, num_masks], representing the foreground mask targets. Returns: mask_loss: a float tensor representing total mask loss. """ with tf.name_scope('mask_rcnn_loss'): _, _, mask_height, mask_width = mask_outputs.get_shape().as_list() weights = tf.tile( tf.greater(select_class_targets, 0)[:, :, tf.newaxis, tf.newaxis], [1, 1, mask_height, mask_width], ) weights = tf.cast(weights, dtype=mask_outputs.dtype) mask_targets = tf.expand_dims(mask_targets, axis=-1) mask_outputs = tf.expand_dims(mask_outputs, axis=-1) mask_loss = self._binary_crossentropy(mask_targets, mask_outputs, sample_weight=weights) # For each replica, get the sum of non-zero weights. Then get the mean of # sums from all replicas. Note there is an extra division by # `num_replicas` in train_step(). So it is equivalent to normalizing the # mask loss by the global sum of non-zero weights. replica_context = tf.distribute.get_replica_context() weights = tf.reduce_sum(weights) weights_mean = replica_context.all_reduce( tf.distribute.ReduceOp.MEAN, weights ) # The loss is normalized by the number of 1's in weights and # + 0.01 is used to avoid division by zero. return mask_loss / (weights_mean + 0.01)
16,390
43.061828
186
py
models
models-master/official/vision/losses/segmentation_losses.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses used for segmentation models.""" import tensorflow as tf from official.modeling import tf_utils from official.vision.dataloaders import utils EPSILON = 1e-5 class SegmentationLoss: """Semantic segmentation loss.""" def __init__(self, label_smoothing, class_weights, ignore_label, use_groundtruth_dimension, use_binary_cross_entropy=False, top_k_percent_pixels=1.0, gt_is_matting_map=False): """Initializes `SegmentationLoss`. Args: label_smoothing: A float, if > 0., smooth out one-hot probability by spreading the amount of probability to all other label classes. class_weights: A float list containing the weight of each class. ignore_label: An integer specifying the ignore label. use_groundtruth_dimension: A boolean, whether to resize the output to match the dimension of the ground truth. use_binary_cross_entropy: A boolean, if true, use binary cross entropy loss, otherwise, use categorical cross entropy. top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value < 1., only compute the loss for the top k percent pixels. This is useful for hard pixel mining. gt_is_matting_map: If or not the groundtruth mask is a matting map. Note that the matting map is only supported for 2 class segmentation. """ self._label_smoothing = label_smoothing self._class_weights = class_weights self._ignore_label = ignore_label self._use_groundtruth_dimension = use_groundtruth_dimension self._use_binary_cross_entropy = use_binary_cross_entropy self._top_k_percent_pixels = top_k_percent_pixels self._gt_is_matting_map = gt_is_matting_map def __call__(self, logits, labels, **kwargs): """Computes `SegmentationLoss`. Args: logits: A float tensor in shape (batch_size, height, width, num_classes) which is the output of the network. labels: A tensor in shape (batch_size, height, width, num_layers), which is the label masks of the ground truth. The num_layers can be > 1 if the pixels are labeled as multiple classes. **kwargs: additional keyword arguments. Returns: A 0-D float which stores the overall loss of the batch. """ _, height, width, num_classes = logits.get_shape().as_list() output_dtype = logits.dtype num_layers = labels.get_shape().as_list()[-1] if not self._use_binary_cross_entropy: if num_layers > 1: raise ValueError( 'Groundtruth mask must have only 1 layer if using categorical' 'cross entropy, but got {} layers.'.format(num_layers)) if self._gt_is_matting_map: if num_classes != 2: raise ValueError( 'Groundtruth matting map only supports 2 classes, but got {} ' 'classes.'.format(num_classes)) if num_layers > 1: raise ValueError( 'Groundtruth matting map must have only 1 layer, but got {} ' 'layers.'.format(num_layers)) class_weights = ( self._class_weights if self._class_weights else [1] * num_classes) if num_classes != len(class_weights): raise ValueError( 'Length of class_weights should be {}'.format(num_classes)) class_weights = tf.constant(class_weights, dtype=output_dtype) if not self._gt_is_matting_map: labels = tf.cast(labels, tf.int32) if self._use_groundtruth_dimension: # TODO(arashwan): Test using align corners to match deeplab alignment. logits = tf.image.resize( logits, tf.shape(labels)[1:3], method=tf.image.ResizeMethod.BILINEAR) else: labels = tf.image.resize( labels, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) valid_mask = tf.not_equal(tf.cast(labels, tf.int32), self._ignore_label) # (batch_size, height, width, num_classes) labels_with_prob = self.get_labels_with_prob(logits, labels, valid_mask, **kwargs) # (batch_size, height, width) valid_mask = tf.cast(tf.reduce_any(valid_mask, axis=-1), dtype=output_dtype) if self._use_binary_cross_entropy: # (batch_size, height, width, num_classes) cross_entropy_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=labels_with_prob, logits=logits) # (batch_size, height, width, num_classes) cross_entropy_loss *= class_weights num_valid_values = tf.reduce_sum(valid_mask) * tf.cast( num_classes, output_dtype) # (batch_size, height, width, num_classes) cross_entropy_loss *= valid_mask[..., tf.newaxis] else: # (batch_size, height, width) cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits( labels=labels_with_prob, logits=logits) # If groundtruth is matting map, binarize the value to create the weight # mask if self._gt_is_matting_map: labels = utils.binarize_matting_map(labels) # (batch_size, height, width) weight_mask = tf.einsum( '...y,y->...', tf.one_hot( tf.cast(tf.squeeze(labels, axis=-1), tf.int32), depth=num_classes, dtype=output_dtype), class_weights) cross_entropy_loss *= weight_mask num_valid_values = tf.reduce_sum(valid_mask) cross_entropy_loss *= valid_mask if self._top_k_percent_pixels < 1.0: return self.aggregate_loss_top_k(cross_entropy_loss, num_valid_values) else: return tf.reduce_sum(cross_entropy_loss) / (num_valid_values + EPSILON) def get_labels_with_prob(self, logits, labels, valid_mask, **unused_kwargs): """Get a tensor representing the probability of each class for each pixel. This method can be overridden in subclasses for customizing loss function. Args: logits: A float tensor in shape (batch_size, height, width, num_classes) which is the output of the network. labels: A tensor in shape (batch_size, height, width, num_layers), which is the label masks of the ground truth. The num_layers can be > 1 if the pixels are labeled as multiple classes. valid_mask: A bool tensor in shape (batch_size, height, width, num_layers) which indicates the ignored labels in each ground truth layer. **unused_kwargs: Unused keyword arguments. Returns: A float tensor in shape (batch_size, height, width, num_classes). """ num_classes = logits.get_shape().as_list()[-1] if self._gt_is_matting_map: # (batch_size, height, width, num_classes=2) train_labels = tf.concat([1 - labels, labels], axis=-1) else: labels = tf.cast(labels, tf.int32) # Assign pixel with ignore label to class -1, which will be ignored by # tf.one_hot operation. # (batch_size, height, width, num_masks) labels = tf.where(valid_mask, labels, -tf.ones_like(labels)) if self._use_binary_cross_entropy: # (batch_size, height, width, num_masks, num_classes) one_hot_labels_per_mask = tf.one_hot( labels, depth=num_classes, on_value=True, off_value=False, dtype=tf.bool, axis=-1) # Aggregate all one-hot labels to get a binary mask in shape # (batch_size, height, width, num_classes), which represents all the # classes that a pixel is labeled as. # For example, if a pixel is labeled as "window" (id=1) and also being a # part of the "building" (id=3), then its train_labels are [0,1,0,1]. train_labels = tf.cast( tf.reduce_any(one_hot_labels_per_mask, axis=-2), dtype=logits.dtype) else: # (batch_size, height, width, num_classes) train_labels = tf.one_hot( tf.squeeze(labels, axis=-1), depth=num_classes, dtype=logits.dtype) return train_labels * ( 1 - self._label_smoothing) + self._label_smoothing / num_classes def aggregate_loss_top_k(self, pixelwise_loss, num_valid_pixels=None): """Aggregate the top-k greatest pixelwise loss. Args: pixelwise_loss: a float tensor in shape (batch_size, height, width) which stores the loss of each pixel. num_valid_pixels: the number of pixels which are not ignored. If None, all the pixels are valid. Returns: A 0-D float which stores the overall loss of the batch. """ pixelwise_loss = tf.reshape(pixelwise_loss, shape=[-1]) top_k_pixels = tf.cast( self._top_k_percent_pixels * tf.cast(tf.size(pixelwise_loss), tf.float32), tf.int32, ) top_k_losses, _ = tf.math.top_k(pixelwise_loss, k=top_k_pixels) normalizer = tf.cast(top_k_pixels, top_k_losses.dtype) if num_valid_pixels is not None: normalizer = tf.minimum(normalizer, tf.cast(num_valid_pixels, top_k_losses.dtype)) return tf.reduce_sum(top_k_losses) / (normalizer + EPSILON) def get_actual_mask_scores(logits, labels, ignore_label): """Gets actual mask scores.""" _, height, width, num_classes = logits.get_shape().as_list() batch_size = tf.shape(logits)[0] logits = tf.stop_gradient(logits) labels = tf.image.resize( labels, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) predicted_labels = tf.argmax(logits, -1, output_type=tf.int32) flat_predictions = tf.reshape(predicted_labels, [batch_size, -1]) flat_labels = tf.cast(tf.reshape(labels, [batch_size, -1]), tf.int32) one_hot_predictions = tf.one_hot( flat_predictions, num_classes, on_value=True, off_value=False) one_hot_labels = tf.one_hot( flat_labels, num_classes, on_value=True, off_value=False) keep_mask = tf.not_equal(flat_labels, ignore_label) keep_mask = tf.expand_dims(keep_mask, 2) overlap = tf.logical_and(one_hot_predictions, one_hot_labels) overlap = tf.logical_and(overlap, keep_mask) overlap = tf.reduce_sum(tf.cast(overlap, tf.float32), axis=1) union = tf.logical_or(one_hot_predictions, one_hot_labels) union = tf.logical_and(union, keep_mask) union = tf.reduce_sum(tf.cast(union, tf.float32), axis=1) actual_scores = tf.divide(overlap, tf.maximum(union, EPSILON)) return actual_scores class MaskScoringLoss: """Mask Scoring loss.""" def __init__(self, ignore_label): self._ignore_label = ignore_label self._mse_loss = tf.keras.losses.MeanSquaredError( reduction=tf.keras.losses.Reduction.NONE) def __call__(self, predicted_scores, logits, labels): actual_scores = get_actual_mask_scores(logits, labels, self._ignore_label) loss = tf_utils.safe_mean(self._mse_loss(actual_scores, predicted_scores)) return loss
11,414
40.358696
80
py
models
models-master/official/vision/losses/focal_loss.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses used for detection models.""" import tensorflow as tf class FocalLoss(tf.keras.losses.Loss): """Implements a Focal loss for classification problems. Reference: [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002). """ def __init__(self, alpha, gamma, reduction=tf.keras.losses.Reduction.AUTO, name=None): """Initializes `FocalLoss`. Args: alpha: The `alpha` weight factor for binary class imbalance. gamma: The `gamma` focusing parameter to re-weight loss. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'retinanet_class_loss'. """ self._alpha = alpha self._gamma = gamma super(FocalLoss, self).__init__(reduction=reduction, name=name) def call(self, y_true, y_pred): """Invokes the `FocalLoss`. Args: y_true: A tensor of size [batch, num_anchors, num_classes] y_pred: A tensor of size [batch, num_anchors, num_classes] Returns: Summed loss float `Tensor`. """ with tf.name_scope('focal_loss'): y_true = tf.cast(y_true, dtype=tf.float32) y_pred = tf.cast(y_pred, dtype=tf.float32) positive_label_mask = tf.equal(y_true, 1.0) cross_entropy = ( tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)) probs = tf.sigmoid(y_pred) probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs) # With small gamma, the implementation could produce NaN during back prop. modulator = tf.pow(1.0 - probs_gt, self._gamma) loss = modulator * cross_entropy weighted_loss = tf.where(positive_label_mask, self._alpha * loss, (1.0 - self._alpha) * loss) return weighted_loss def get_config(self): config = { 'alpha': self._alpha, 'gamma': self._gamma, } base_config = super(FocalLoss, self).get_config() return dict(list(base_config.items()) + list(config.items()))
3,229
37
80
py
models
models-master/official/vision/losses/retinanet_losses.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses used for detection models.""" # Import libraries import tensorflow as tf def focal_loss(logits, targets, alpha, gamma): """Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class. Args: logits: A float32 tensor of size [batch, d_1, ..., d_k, n_classes]. targets: A float32 tensor of size [batch, d_1, ..., d_k, n_classes]. alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. Returns: loss: A float32 Tensor of size [batch, d_1, ..., d_k, n_classes] representing normalized loss on the prediction map. """ with tf.name_scope('focal_loss'): positive_label_mask = tf.equal(targets, 1.0) cross_entropy = ( tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits)) probs = tf.sigmoid(logits) probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs) # With small gamma, the implementation could produce NaN during back prop. modulator = tf.pow(1.0 - probs_gt, gamma) loss = modulator * cross_entropy weighted_loss = tf.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss) return weighted_loss class FocalLoss(tf.keras.losses.Loss): """Implements a Focal loss for classification problems. Reference: [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002). """ def __init__(self, alpha, gamma, num_classes, reduction=tf.keras.losses.Reduction.AUTO, name=None): """Initializes `FocalLoss`. Args: alpha: The `alpha` weight factor for binary class imbalance. gamma: The `gamma` focusing parameter to re-weight loss. num_classes: Number of foreground classes. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'retinanet_class_loss'. """ self._num_classes = num_classes self._alpha = alpha self._gamma = gamma super(FocalLoss, self).__init__(reduction=reduction, name=name) def call(self, y_true, y_pred): """Invokes the `FocalLoss`. Args: y_true: Ordered Dict with level to [batch, height, width, num_anchors]. for example, {3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.float32), 4: tf.Tensor([shape=32, 256, 256, 9, dtype=tf.float32])} y_pred: Ordered Dict with level to [batch, height, width, num_anchors * num_classes]. for example, {3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.int64), 4: tf.Tensor(shape=[32, 256, 256, 9 * 21], dtype=tf.int64)} Returns: Summed loss float `Tensor`. """ flattened_cls_outputs = [] flattened_labels = [] batch_size = None for level in y_pred.keys(): cls_output = y_pred[level] label = y_true[level] if batch_size is None: batch_size = cls_output.shape[0] or tf.shape(cls_output)[0] flattened_cls_outputs.append( tf.reshape(cls_output, [batch_size, -1, self._num_classes])) flattened_labels.append(tf.reshape(label, [batch_size, -1])) cls_outputs = tf.concat(flattened_cls_outputs, axis=1) labels = tf.concat(flattened_labels, axis=1) cls_targets_one_hot = tf.one_hot(labels, self._num_classes) return focal_loss( tf.cast(cls_outputs, dtype=tf.float32), tf.cast(cls_targets_one_hot, dtype=tf.float32), self._alpha, self._gamma) def get_config(self): config = { 'alpha': self._alpha, 'gamma': self._gamma, 'num_classes': self._num_classes, } base_config = super(FocalLoss, self).get_config() return dict(list(base_config.items()) + list(config.items())) class RetinanetBoxLoss(tf.keras.losses.Loss): """RetinaNet box Huber loss.""" def __init__(self, delta, reduction=tf.keras.losses.Reduction.AUTO, name=None): """Initializes `RetinanetBoxLoss`. Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'retinanet_class_loss'. """ self._huber_loss = tf.keras.losses.Huber( delta=delta, reduction=tf.keras.losses.Reduction.NONE) self._delta = delta super(RetinanetBoxLoss, self).__init__(reduction=reduction, name=name) def call(self, y_true, y_pred): """Computes box detection loss. Computes total detection loss including box and class loss from all levels. Args: y_true: Ordered Dict with level to [batch, height, width, num_anchors * 4] for example, {3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.float32), 4: tf.Tensor([shape=32, 256, 256, 9 * 4, dtype=tf.float32])} y_pred: Ordered Dict with level to [batch, height, width, num_anchors * 4]. for example, {3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.int64), 4: tf.Tensor(shape=[32, 256, 256, 9 * 4], dtype=tf.int64)} Returns: an integer tensor representing total box regression loss. """ # Sums all positives in a batch for normalization and avoids zero # num_positives_sum, which would lead to inf loss during training flattened_box_outputs = [] flattened_labels = [] batch_size = None for level in y_pred.keys(): box_output = y_pred[level] label = y_true[level] if batch_size is None: batch_size = box_output.shape[0] or tf.shape(box_output)[0] flattened_box_outputs.append(tf.reshape(box_output, [batch_size, -1, 4])) flattened_labels.append(tf.reshape(label, [batch_size, -1, 4])) box_outputs = tf.concat(flattened_box_outputs, axis=1) labels = tf.concat(flattened_labels, axis=1) loss = self._huber_loss(labels, box_outputs) return loss def get_config(self): config = { 'delta': self._delta, } base_config = super(RetinanetBoxLoss, self).get_config() return dict(list(base_config.items()) + list(config.items()))
8,097
38.120773
80
py
models
models-master/official/recommendation/neumf_model.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines NeuMF model for NCF framework. Some abbreviations used in the code base: NeuMF: Neural Matrix Factorization NCF: Neural Collaborative Filtering GMF: Generalized Matrix Factorization MLP: Multi-Layer Perceptron GMF applies a linear kernel to model the latent feature interactions, and MLP uses a nonlinear kernel to learn the interaction function from data. NeuMF model is a fused model of GMF and MLP to better model the complex user-item interactions, and unifies the strengths of linearity of MF and non-linearity of MLP for modeling the user-item latent structures. In NeuMF model, it allows GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow import estimator as tf_estimator from typing import Any, Dict, Text from official.recommendation import constants as rconst from official.recommendation import movielens from official.recommendation import ncf_common from official.recommendation import stat_utils def sparse_to_dense_grads(grads_and_vars): """Convert sparse gradients to dense gradients. All sparse gradients, which are represented as instances of tf.IndexedSlices, are converted to dense Tensors. Dense gradients, which are represents as Tensors, are unchanged. The purpose of this conversion is that for small embeddings, which are used by this model, applying dense gradients with the AdamOptimizer is faster than applying sparse gradients. Args grads_and_vars: A list of (gradient, variable) tuples. Each gradient can be a Tensor or an IndexedSlices. Tensors are unchanged, and IndexedSlices are converted to dense Tensors. Returns: The same list of (gradient, variable) as `grads_and_vars`, except each IndexedSlices gradient is converted to a Tensor. """ # Calling convert_to_tensor changes IndexedSlices into Tensors, and leaves # Tensors unchanged. return [(tf.convert_to_tensor(g), v) for g, v in grads_and_vars] def neumf_model_fn(features, labels, mode, params): """Model Function for NeuMF estimator.""" if params.get("use_seed"): tf.set_random_seed(stat_utils.random_int32()) users = features[movielens.USER_COLUMN] items = features[movielens.ITEM_COLUMN] user_input = tf.keras.layers.Input(tensor=users) item_input = tf.keras.layers.Input(tensor=items) logits = construct_model(user_input, item_input, params).output # Softmax with the first column of zeros is equivalent to sigmoid. softmax_logits = ncf_common.convert_to_softmax_logits(logits) if mode == tf_estimator.ModeKeys.EVAL: duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK], tf.float32) return _get_estimator_spec_with_metrics( logits, softmax_logits, duplicate_mask, params["num_neg"], params["match_mlperf"], use_tpu_spec=params["use_tpu"]) elif mode == tf_estimator.ModeKeys.TRAIN: labels = tf.cast(labels, tf.int32) valid_pt_mask = features[rconst.VALID_POINT_MASK] optimizer = tf.compat.v1.train.AdamOptimizer( learning_rate=params["learning_rate"], beta1=params["beta1"], beta2=params["beta2"], epsilon=params["epsilon"]) if params["use_tpu"]: optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer) loss = tf.compat.v1.losses.sparse_softmax_cross_entropy( labels=labels, logits=softmax_logits, weights=tf.cast(valid_pt_mask, tf.float32)) tf.identity(loss, name="cross_entropy") global_step = tf.compat.v1.train.get_global_step() tvars = tf.compat.v1.trainable_variables() gradients = optimizer.compute_gradients( loss, tvars, colocate_gradients_with_ops=True) gradients = sparse_to_dense_grads(gradients) minimize_op = optimizer.apply_gradients( gradients, global_step=global_step, name="train") update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) return tf_estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: raise NotImplementedError def _strip_first_and_last_dimension(x, batch_size): return tf.reshape(x[0, :], (batch_size,)) def construct_model(user_input: tf.Tensor, item_input: tf.Tensor, params: Dict[Text, Any]) -> tf.keras.Model: """Initialize NeuMF model. Args: user_input: keras input layer for users item_input: keras input layer for items params: Dict of hyperparameters. Raises: ValueError: if the first model layer is not even. Returns: model: a keras Model for computing the logits """ num_users = params["num_users"] num_items = params["num_items"] model_layers = params["model_layers"] mf_regularization = params["mf_regularization"] mlp_reg_layers = params["mlp_reg_layers"] mf_dim = params["mf_dim"] if model_layers[0] % 2 != 0: raise ValueError("The first layer size should be multiple of 2!") # Initializer for embedding layers embedding_initializer = "glorot_uniform" def mf_slice_fn(x): x = tf.squeeze(x, [1]) return x[:, :mf_dim] def mlp_slice_fn(x): x = tf.squeeze(x, [1]) return x[:, mf_dim:] # It turns out to be significantly more effecient to store the MF and MLP # embedding portions in the same table, and then slice as needed. embedding_user = tf.keras.layers.Embedding( num_users, mf_dim + model_layers[0] // 2, embeddings_initializer=embedding_initializer, embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization), input_length=1, name="embedding_user")( user_input) embedding_item = tf.keras.layers.Embedding( num_items, mf_dim + model_layers[0] // 2, embeddings_initializer=embedding_initializer, embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization), input_length=1, name="embedding_item")( item_input) # GMF part mf_user_latent = tf.keras.layers.Lambda( mf_slice_fn, name="embedding_user_mf")( embedding_user) mf_item_latent = tf.keras.layers.Lambda( mf_slice_fn, name="embedding_item_mf")( embedding_item) # MLP part mlp_user_latent = tf.keras.layers.Lambda( mlp_slice_fn, name="embedding_user_mlp")( embedding_user) mlp_item_latent = tf.keras.layers.Lambda( mlp_slice_fn, name="embedding_item_mlp")( embedding_item) # Element-wise multiply mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent]) # Concatenation of two latent features mlp_vector = tf.keras.layers.concatenate([mlp_user_latent, mlp_item_latent]) num_layer = len(model_layers) # Number of layers in the MLP for layer in xrange(1, num_layer): model_layer = tf.keras.layers.Dense( model_layers[layer], kernel_regularizer=tf.keras.regularizers.l2(mlp_reg_layers[layer]), activation="relu") mlp_vector = model_layer(mlp_vector) # Concatenate GMF and MLP parts predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector]) # Final prediction layer logits = tf.keras.layers.Dense( 1, activation=None, kernel_initializer="lecun_uniform", name=movielens.RATING_COLUMN)( predict_vector) # Print model topology. model = tf.keras.models.Model([user_input, item_input], logits) model.summary() sys.stdout.flush() return model def _get_estimator_spec_with_metrics(logits: tf.Tensor, softmax_logits: tf.Tensor, duplicate_mask: tf.Tensor, num_training_neg: int, match_mlperf: bool = False, use_tpu_spec: bool = False): """Returns a EstimatorSpec that includes the metrics.""" cross_entropy, \ metric_fn, \ in_top_k, \ ndcg, \ metric_weights = compute_eval_loss_and_metrics_helper( logits, softmax_logits, duplicate_mask, num_training_neg, match_mlperf) if use_tpu_spec: return tf_estimator.tpu.TPUEstimatorSpec( mode=tf_estimator.ModeKeys.EVAL, loss=cross_entropy, eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights])) return tf_estimator.EstimatorSpec( mode=tf_estimator.ModeKeys.EVAL, loss=cross_entropy, eval_metric_ops=metric_fn(in_top_k, ndcg, metric_weights)) def compute_eval_loss_and_metrics_helper(logits: tf.Tensor, softmax_logits: tf.Tensor, duplicate_mask: tf.Tensor, num_training_neg: int, match_mlperf: bool = False): """Model evaluation with HR and NDCG metrics. The evaluation protocol is to rank the test interacted item (truth items) among the randomly chosen 999 items that are not interacted by the user. The performance of the ranked list is judged by Hit Ratio (HR) and Normalized Discounted Cumulative Gain (NDCG). For evaluation, the ranked list is truncated at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks. Both metrics are calculated for each test user, and the average scores are reported. If `match_mlperf` is True, then the HR and NDCG computations are done in a slightly unusual way to match the MLPerf reference implementation. Specifically, if the evaluation negatives contain duplicate items, it will be treated as if the item only appeared once. Effectively, for duplicate items in a row, the predicted score for all but one of the items will be set to -infinity For example, suppose we have that following inputs: logits_by_user: [[ 2, 3, 3], [ 5, 4, 4]] items_by_user: [[10, 20, 20], [30, 40, 40]] # Note: items_by_user is not explicitly present. Instead the relevant \ information is contained within `duplicate_mask` top_k: 2 Then with match_mlperf=True, the HR would be 2/2 = 1.0. With match_mlperf=False, the HR would be 1/2 = 0.5. This is because each user has predicted scores for only 2 unique items: 10 and 20 for the first user, and 30 and 40 for the second. Therefore, with match_mlperf=True, it's guaranteed the first item's score is in the top 2. With match_mlperf=False, this function would compute the first user's first item is not in the top 2, because item 20 has a higher score, and item 20 occurs twice. Args: logits: A tensor containing the predicted logits for each user. The shape of logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a user are grouped, and the last element of the group is the true element. softmax_logits: The same tensor, but with zeros left-appended. duplicate_mask: A vector with the same shape as logits, with a value of 1 if the item corresponding to the logit at that position has already appeared for that user. num_training_neg: The number of negatives per positive during training. match_mlperf: Use the MLPerf reference convention for computing rank. Returns: cross_entropy: the loss metric_fn: the metrics function in_top_k: hit rate metric ndcg: ndcg metric metric_weights: metric weights """ in_top_k, ndcg, metric_weights, logits_by_user = compute_top_k_and_ndcg( logits, duplicate_mask, match_mlperf) # Examples are provided by the eval Dataset in a structured format, so eval # labels can be reconstructed on the fly. eval_labels = tf.reshape( shape=(-1,), tensor=tf.one_hot( tf.zeros(shape=(logits_by_user.shape[0],), dtype=tf.int32) + rconst.NUM_EVAL_NEGATIVES, logits_by_user.shape[1], dtype=tf.int32)) eval_labels_float = tf.cast(eval_labels, tf.float32) # During evaluation, the ratio of negatives to positives is much higher # than during training. (Typically 999 to 1 vs. 4 to 1) By adjusting the # weights for the negative examples we compute a loss which is consistent with # the training data. (And provides apples-to-apples comparison) negative_scale_factor = num_training_neg / rconst.NUM_EVAL_NEGATIVES example_weights = ((eval_labels_float + (1 - eval_labels_float) * negative_scale_factor) * (1 + rconst.NUM_EVAL_NEGATIVES) / (1 + num_training_neg)) # Tile metric weights back to logit dimensions expanded_metric_weights = tf.reshape( tf.tile(metric_weights[:, tf.newaxis], (1, rconst.NUM_EVAL_NEGATIVES + 1)), (-1,)) # ignore padded examples example_weights *= tf.cast(expanded_metric_weights, tf.float32) cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy( logits=softmax_logits, labels=eval_labels, weights=example_weights) def metric_fn(top_k_tensor, ndcg_tensor, weight_tensor): return { rconst.HR_KEY: tf.compat.v1.metrics.mean( top_k_tensor, weights=weight_tensor, name=rconst.HR_METRIC_NAME), rconst.NDCG_KEY: tf.compat.v1.metrics.mean( ndcg_tensor, weights=weight_tensor, name=rconst.NDCG_METRIC_NAME) } return cross_entropy, metric_fn, in_top_k, ndcg, metric_weights def compute_top_k_and_ndcg(logits: tf.Tensor, duplicate_mask: tf.Tensor, match_mlperf: bool = False): """Compute inputs of metric calculation. Args: logits: A tensor containing the predicted logits for each user. The shape of logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a user are grouped, and the first element of the group is the true element. duplicate_mask: A vector with the same shape as logits, with a value of 1 if the item corresponding to the logit at that position has already appeared for that user. match_mlperf: Use the MLPerf reference convention for computing rank. Returns: is_top_k, ndcg and weights, all of which has size (num_users_in_batch,), and logits_by_user which has size (num_users_in_batch, (rconst.NUM_EVAL_NEGATIVES + 1)). """ logits_by_user = tf.reshape(logits, (-1, rconst.NUM_EVAL_NEGATIVES + 1)) duplicate_mask_by_user = tf.cast( tf.reshape(duplicate_mask, (-1, rconst.NUM_EVAL_NEGATIVES + 1)), logits_by_user.dtype) if match_mlperf: # Set duplicate logits to the min value for that dtype. The MLPerf # reference dedupes during evaluation. logits_by_user *= (1 - duplicate_mask_by_user) logits_by_user += duplicate_mask_by_user * logits_by_user.dtype.min # Determine the location of the first element in each row after the elements # are sorted. sort_indices = tf.argsort(logits_by_user, axis=1, direction="DESCENDING") # Use matrix multiplication to extract the position of the true item from the # tensor of sorted indices. This approach is chosen because both GPUs and TPUs # perform matrix multiplications very quickly. This is similar to np.argwhere. # However this is a special case because the target will only appear in # sort_indices once. one_hot_position = tf.cast( tf.equal(sort_indices, rconst.NUM_EVAL_NEGATIVES), tf.int32) sparse_positions = tf.multiply( one_hot_position, tf.range(logits_by_user.shape[1])[tf.newaxis, :]) position_vector = tf.reduce_sum(sparse_positions, axis=1) in_top_k = tf.cast(tf.less(position_vector, rconst.TOP_K), tf.float32) ndcg = tf.math.log(2.) / tf.math.log(tf.cast(position_vector, tf.float32) + 2) ndcg *= in_top_k # If a row is a padded row, all but the first element will be a duplicate. metric_weights = tf.not_equal( tf.reduce_sum(duplicate_mask_by_user, axis=1), rconst.NUM_EVAL_NEGATIVES) return in_top_k, ndcg, metric_weights, logits_by_user
16,940
37.155405
80
py
models
models-master/official/recommendation/ncf_keras_main.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NCF framework to train and evaluate the NeuMF model. The NeuMF model assembles both MF and MLP models under the NCF framework. Check `neumf_model.py` for more details about the models. """ import json import os # pylint: disable=g-bad-import-order from absl import app from absl import flags from absl import logging import tensorflow as tf # pylint: enable=g-bad-import-order from official.common import distribute_utils from official.recommendation import constants as rconst from official.recommendation import movielens from official.recommendation import ncf_common from official.recommendation import ncf_input_pipeline from official.recommendation import neumf_model from official.utils.flags import core as flags_core from official.utils.misc import keras_utils from official.utils.misc import model_helpers FLAGS = flags.FLAGS def metric_fn(logits, dup_mask, match_mlperf): dup_mask = tf.cast(dup_mask, tf.float32) logits = tf.slice(logits, [0, 1], [-1, -1]) in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg( logits, dup_mask, match_mlperf) metric_weights = tf.cast(metric_weights, tf.float32) return in_top_k, metric_weights class MetricLayer(tf.keras.layers.Layer): """Custom layer of metrics for NCF model.""" def __init__(self, match_mlperf): super(MetricLayer, self).__init__() self.match_mlperf = match_mlperf def get_config(self): return {"match_mlperf": self.match_mlperf} @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def call(self, inputs, training=False): logits, dup_mask = inputs if training: hr_sum = 0.0 hr_count = 0.0 else: metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf) hr_sum = tf.reduce_sum(metric * metric_weights) hr_count = tf.reduce_sum(metric_weights) self.add_metric(hr_sum, name="hr_sum", aggregation="mean") self.add_metric(hr_count, name="hr_count", aggregation="mean") return logits class LossLayer(tf.keras.layers.Layer): """Pass-through loss layer for NCF model.""" def __init__(self, loss_normalization_factor): # The loss may overflow in float16, so we use float32 instead. super(LossLayer, self).__init__(dtype="float32") self.loss_normalization_factor = loss_normalization_factor self.loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction="sum") def get_config(self): return {"loss_normalization_factor": self.loss_normalization_factor} @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def call(self, inputs): logits, labels, valid_pt_mask_input = inputs loss = self.loss( y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input) loss = loss * (1.0 / self.loss_normalization_factor) self.add_loss(loss) return logits class IncrementEpochCallback(tf.keras.callbacks.Callback): """A callback to increase the requested epoch for the data producer. The reason why we need this is because we can only buffer a limited amount of data. So we keep a moving window to represent the buffer. This is to move the one of the window's boundaries for each epoch. """ def __init__(self, producer): self._producer = producer def on_epoch_begin(self, epoch, logs=None): self._producer.increment_request_epoch() class CustomEarlyStopping(tf.keras.callbacks.Callback): """Stop training has reached a desired hit rate.""" def __init__(self, monitor, desired_value): super(CustomEarlyStopping, self).__init__() self.monitor = monitor self.desired = desired_value self.stopped_epoch = 0 def on_epoch_end(self, epoch, logs=None): current = self.get_monitor_value(logs) if current and current >= self.desired: self.stopped_epoch = epoch self.model.stop_training = True def on_train_end(self, logs=None): if self.stopped_epoch > 0: print("Epoch %05d: early stopping" % (self.stopped_epoch + 1)) def get_monitor_value(self, logs): logs = logs or {} monitor_value = logs.get(self.monitor) if monitor_value is None: logging.warning( "Early stopping conditioned on metric `%s` " "which is not available. Available metrics are: %s", self.monitor, ",".join(list(logs.keys()))) return monitor_value def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params["batch_size"] user_input = tf.keras.layers.Input( shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32) item_input = tf.keras.layers.Input( shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32) valid_pt_mask_input = tf.keras.layers.Input( shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool) dup_mask_input = tf.keras.layers.Input( shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32) label_input = tf.keras.layers.Input( shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool) base_model = neumf_model.construct_model(user_input, item_input, params) logits = base_model.output zeros = tf.keras.layers.Lambda(lambda x: x * 0)(logits) softmax_logits = tf.keras.layers.concatenate([zeros, logits], axis=-1) # Custom training loop calculates loss and metric as a part of # training/evaluation step function. if not params["keras_use_ctl"]: softmax_logits = MetricLayer( params["match_mlperf"])([softmax_logits, dup_mask_input]) # TODO(b/134744680): Use model.add_loss() instead once the API is well # supported. softmax_logits = LossLayer(batch_size)( [softmax_logits, label_input, valid_pt_mask_input]) keras_model = tf.keras.Model( inputs={ movielens.USER_COLUMN: user_input, movielens.ITEM_COLUMN: item_input, rconst.VALID_POINT_MASK: valid_pt_mask_input, rconst.DUPLICATE_MASK: dup_mask_input, rconst.TRAIN_LABEL_KEY: label_input }, outputs=softmax_logits) keras_model.summary() return keras_model def run_ncf(_): """Run NCF training and eval with Keras.""" keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) if FLAGS.seed is not None: print("Setting tf seed") tf.random.set_seed(FLAGS.seed) model_helpers.apply_clean(FLAGS) if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras": tf.keras.mixed_precision.set_global_policy("mixed_float16") strategy = distribute_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, num_gpus=FLAGS.num_gpus, tpu_address=FLAGS.tpu) params = ncf_common.parse_flags(FLAGS) params["distribute_strategy"] = strategy params["use_tpu"] = (FLAGS.distribution_strategy == "tpu") if params["use_tpu"] and not params["keras_use_ctl"]: logging.error("Custom training loop must be used when using TPUStrategy.") return batch_size = params["batch_size"] time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps) callbacks = [time_callback] producer, input_meta_data = None, None generate_input_online = params["train_dataset_path"] is None if generate_input_online: # Start data producing thread. num_users, num_items, _, _, producer = ncf_common.get_inputs(params) producer.start() per_epoch_callback = IncrementEpochCallback(producer) callbacks.append(per_epoch_callback) else: assert params["eval_dataset_path"] and params["input_meta_data_path"] with tf.io.gfile.GFile(params["input_meta_data_path"], "rb") as reader: input_meta_data = json.loads(reader.read().decode("utf-8")) num_users = input_meta_data["num_users"] num_items = input_meta_data["num_items"] params["num_users"], params["num_items"] = num_users, num_items if FLAGS.early_stopping: early_stopping_callback = CustomEarlyStopping( "val_HR_METRIC", desired_value=FLAGS.hr_threshold) callbacks.append(early_stopping_callback) (train_input_dataset, eval_input_dataset, num_train_steps, num_eval_steps) = ncf_input_pipeline.create_ncf_input_data( params, producer, input_meta_data, strategy) steps_per_epoch = None if generate_input_online else num_train_steps with distribute_utils.get_strategy_scope(strategy): keras_model = _get_keras_model(params) optimizer = tf.keras.optimizers.Adam( learning_rate=params["learning_rate"], beta_1=params["beta1"], beta_2=params["beta2"], epsilon=params["epsilon"]) if FLAGS.fp16_implementation == "graph_rewrite": optimizer = \ tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite( optimizer, loss_scale=flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic")) elif FLAGS.dtype == "fp16": loss_scale = flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic") # Note Model.compile automatically wraps the optimizer with a # LossScaleOptimizer using dynamic loss scaling. We explicitly wrap it # here for the case where a custom training loop or fixed loss scale is # used. if loss_scale == "dynamic": optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer) else: optimizer = tf.keras.mixed_precision.LossScaleOptimizer( optimizer, dynamic=False, initial_scale=loss_scale) if params["keras_use_ctl"]: train_loss, eval_results = run_ncf_custom_training( params, strategy, keras_model, optimizer, callbacks, train_input_dataset, eval_input_dataset, num_train_steps, num_eval_steps, generate_input_online=generate_input_online) else: keras_model.compile(optimizer=optimizer, run_eagerly=FLAGS.run_eagerly) if not FLAGS.ml_perf: # Create Tensorboard summary and checkpoint callbacks. summary_dir = os.path.join(FLAGS.model_dir, "summaries") summary_callback = tf.keras.callbacks.TensorBoard( summary_dir, profile_batch=0) checkpoint_path = os.path.join(FLAGS.model_dir, "checkpoint") checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, save_weights_only=True) callbacks += [summary_callback, checkpoint_callback] history = keras_model.fit( train_input_dataset, epochs=FLAGS.train_epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, validation_data=eval_input_dataset, validation_steps=num_eval_steps, verbose=2) logging.info("Training done. Start evaluating") eval_loss_and_metrics = keras_model.evaluate( eval_input_dataset, steps=num_eval_steps, verbose=2) logging.info("Keras evaluation is done.") # Keras evaluate() API returns scalar loss and metric values from # evaluation as a list. Here, the returned list would contain # [evaluation loss, hr sum, hr count]. eval_hit_rate = eval_loss_and_metrics[1] / eval_loss_and_metrics[2] # Format evaluation result into [eval loss, eval hit accuracy]. eval_results = [eval_loss_and_metrics[0], eval_hit_rate] if history and history.history: train_history = history.history train_loss = train_history["loss"][-1] stats = build_stats(train_loss, eval_results, time_callback) return stats def run_ncf_custom_training(params, strategy, keras_model, optimizer, callbacks, train_input_dataset, eval_input_dataset, num_train_steps, num_eval_steps, generate_input_online=True): """Runs custom training loop. Args: params: Dictionary containing training parameters. strategy: Distribution strategy to be used for distributed training. keras_model: Model used for training. optimizer: Optimizer used for training. callbacks: Callbacks to be invoked between batches/epochs. train_input_dataset: tf.data.Dataset used for training. eval_input_dataset: tf.data.Dataset used for evaluation. num_train_steps: Total number of steps to run for training. num_eval_steps: Total number of steps to run for evaluation. generate_input_online: Whether input data was generated by data producer. When data is generated by data producer, then train dataset must be re-initialized after every epoch. Returns: A tuple of train loss and a list of training and evaluation results. """ loss_object = tf.keras.losses.SparseCategoricalCrossentropy( reduction="sum", from_logits=True) train_input_iterator = iter( strategy.experimental_distribute_dataset(train_input_dataset)) def train_step(train_iterator): """Called once per step to train the model.""" def step_fn(features): """Computes loss and applied gradient per replica.""" with tf.GradientTape() as tape: softmax_logits = keras_model(features) # The loss can overflow in float16, so we cast to float32. softmax_logits = tf.cast(softmax_logits, "float32") labels = features[rconst.TRAIN_LABEL_KEY] loss = loss_object( labels, softmax_logits, sample_weight=features[rconst.VALID_POINT_MASK]) loss *= (1.0 / params["batch_size"]) if FLAGS.dtype == "fp16": loss = optimizer.get_scaled_loss(loss) grads = tape.gradient(loss, keras_model.trainable_variables) if FLAGS.dtype == "fp16": grads = optimizer.get_unscaled_gradients(grads) # Converting gradients to dense form helps in perf on GPU for NCF grads = neumf_model.sparse_to_dense_grads( list(zip(grads, keras_model.trainable_variables))) optimizer.apply_gradients(grads) return loss per_replica_losses = strategy.run(step_fn, args=(next(train_iterator),)) mean_loss = strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) return mean_loss def eval_step(eval_iterator): """Called once per eval step to compute eval metrics.""" def step_fn(features): """Computes eval metrics per replica.""" softmax_logits = keras_model(features) in_top_k, metric_weights = metric_fn(softmax_logits, features[rconst.DUPLICATE_MASK], params["match_mlperf"]) hr_sum = tf.reduce_sum(in_top_k * metric_weights) hr_count = tf.reduce_sum(metric_weights) return hr_sum, hr_count per_replica_hr_sum, per_replica_hr_count = ( strategy.run(step_fn, args=(next(eval_iterator),))) hr_sum = strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_hr_sum, axis=None) hr_count = strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_hr_count, axis=None) return hr_sum, hr_count if not FLAGS.run_eagerly: train_step = tf.function(train_step) eval_step = tf.function(eval_step) for callback in callbacks: callback.on_train_begin() # Not writing tensorboard summaries if running in MLPerf. if FLAGS.ml_perf: eval_summary_writer, train_summary_writer = None, None else: summary_dir = os.path.join(FLAGS.model_dir, "summaries") eval_summary_writer = tf.summary.create_file_writer( os.path.join(summary_dir, "eval")) train_summary_writer = tf.summary.create_file_writer( os.path.join(summary_dir, "train")) train_loss = 0 for epoch in range(FLAGS.train_epochs): for cb in callbacks: cb.on_epoch_begin(epoch) # As NCF dataset is sampled with randomness, not repeating # data elements in each epoch has significant impact on # convergence. As so, offline-generated TF record files # contains all epoch worth of data. Thus we do not need # to initialize dataset when reading from tf record files. if generate_input_online: train_input_iterator = iter( strategy.experimental_distribute_dataset(train_input_dataset)) train_loss = 0 for step in range(num_train_steps): current_step = step + epoch * num_train_steps for c in callbacks: c.on_batch_begin(current_step) train_loss += train_step(train_input_iterator) # Write train loss once in every 1000 steps. if train_summary_writer and step % 1000 == 0: with train_summary_writer.as_default(): tf.summary.scalar( "training_loss", train_loss / (step + 1), step=current_step) for c in callbacks: c.on_batch_end(current_step) train_loss /= num_train_steps logging.info("Done training epoch %s, epoch loss=%.3f", epoch + 1, train_loss) eval_input_iterator = iter( strategy.experimental_distribute_dataset(eval_input_dataset)) hr_sum = 0.0 hr_count = 0.0 for _ in range(num_eval_steps): step_hr_sum, step_hr_count = eval_step(eval_input_iterator) hr_sum += step_hr_sum hr_count += step_hr_count logging.info("Done eval epoch %s, hit_rate=%.3f", epoch + 1, hr_sum / hr_count) if eval_summary_writer: with eval_summary_writer.as_default(): tf.summary.scalar("hit_rate", hr_sum / hr_count, step=current_step) if (FLAGS.early_stopping and float(hr_sum / hr_count) > params["hr_threshold"]): break for c in callbacks: c.on_train_end() # Saving the model at the end of training. if not FLAGS.ml_perf: checkpoint = tf.train.Checkpoint(model=keras_model, optimizer=optimizer) checkpoint_path = os.path.join(FLAGS.model_dir, "ctl_checkpoint") checkpoint.save(checkpoint_path) logging.info("Saving model as TF checkpoint: %s", checkpoint_path) return train_loss, [None, hr_sum / hr_count] def build_stats(loss, eval_result, time_callback): """Normalizes and returns dictionary of stats. Args: loss: The final loss at training time. eval_result: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback likely used during keras.fit. Returns: Dictionary of normalized results. """ stats = {} if loss: stats["loss"] = loss if eval_result: stats["eval_loss"] = eval_result[0] stats["eval_hit_rate"] = eval_result[1] if time_callback: timestamp_log = time_callback.timestamp_log stats["step_timestamp_log"] = timestamp_log stats["train_finish_time"] = time_callback.train_finish_time if len(timestamp_log) > 1: stats["avg_exp_per_second"] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log) - 1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats def main(_): logging.info("Result is %s", run_ncf(FLAGS)) if __name__ == "__main__": ncf_common.define_ncf_flags() app.run(main)
19,840
34.367201
79
py
models
models-master/official/recommendation/ncf_common.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common functionalities used by both Keras and Estimator implementations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from absl import flags from absl import logging import numpy as np import tensorflow as tf from official.common import distribute_utils from official.recommendation import constants as rconst from official.recommendation import data_pipeline from official.recommendation import data_preprocessing from official.recommendation import movielens from official.utils.flags import core as flags_core FLAGS = flags.FLAGS def get_inputs(params): """Returns some parameters used by the model.""" if FLAGS.download_if_missing and not FLAGS.use_synthetic_data: movielens.download(FLAGS.dataset, FLAGS.data_dir) if FLAGS.seed is not None: np.random.seed(FLAGS.seed) if FLAGS.use_synthetic_data: producer = data_pipeline.DummyConstructor() num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[ FLAGS.dataset] num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH else: num_users, num_items, producer = data_preprocessing.instantiate_pipeline( dataset=FLAGS.dataset, data_dir=FLAGS.data_dir, params=params, constructor_type=FLAGS.constructor_type, deterministic=FLAGS.seed is not None) num_train_steps = producer.train_batches_per_epoch num_eval_steps = producer.eval_batches_per_epoch return num_users, num_items, num_train_steps, num_eval_steps, producer def parse_flags(flags_obj): """Convenience function to turn flags into params.""" num_gpus = flags_core.get_num_gpus(flags_obj) batch_size = flags_obj.batch_size eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size return { "train_epochs": flags_obj.train_epochs, "batches_per_step": 1, "use_seed": flags_obj.seed is not None, "batch_size": batch_size, "eval_batch_size": eval_batch_size, "learning_rate": flags_obj.learning_rate, "mf_dim": flags_obj.num_factors, "model_layers": [int(layer) for layer in flags_obj.layers], "mf_regularization": flags_obj.mf_regularization, "mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization], "num_neg": flags_obj.num_neg, "distribution_strategy": flags_obj.distribution_strategy, "num_gpus": num_gpus, "use_tpu": flags_obj.tpu is not None, "tpu": flags_obj.tpu, "tpu_zone": flags_obj.tpu_zone, "tpu_gcp_project": flags_obj.tpu_gcp_project, "beta1": flags_obj.beta1, "beta2": flags_obj.beta2, "epsilon": flags_obj.epsilon, "match_mlperf": flags_obj.ml_perf, "epochs_between_evals": flags_obj.epochs_between_evals, "keras_use_ctl": flags_obj.keras_use_ctl, "hr_threshold": flags_obj.hr_threshold, "stream_files": flags_obj.tpu is not None, "train_dataset_path": flags_obj.train_dataset_path, "eval_dataset_path": flags_obj.eval_dataset_path, "input_meta_data_path": flags_obj.input_meta_data_path, } def get_v1_distribution_strategy(params): """Returns the distribution strategy to use.""" if params["use_tpu"]: # Some of the networking libraries are quite chatty. for name in [ "googleapiclient.discovery", "googleapiclient.discovery_cache", "oauth2client.transport" ]: logging.getLogger(name).setLevel(logging.ERROR) tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=params["tpu"], zone=params["tpu_zone"], project=params["tpu_gcp_project"], coordinator_name="coordinator") logging.info("Issuing reset command to TPU to ensure a clean state.") tf.Session.reset(tpu_cluster_resolver.get_master()) # Estimator looks at the master it connects to for MonitoredTrainingSession # by reading the `TF_CONFIG` environment variable, and the coordinator # is used by StreamingFilesDataset. tf_config_env = { "session_master": tpu_cluster_resolver.get_master(), "eval_session_master": tpu_cluster_resolver.get_master(), "coordinator": tpu_cluster_resolver.cluster_spec().as_dict()["coordinator"] } os.environ["TF_CONFIG"] = json.dumps(tf_config_env) distribution = tf.distribute.TPUStrategy( tpu_cluster_resolver, steps_per_run=100) else: distribution = distribute_utils.get_distribution_strategy( num_gpus=params["num_gpus"]) return distribution def define_ncf_flags(): """Add flags for running ncf_main.""" # Add common flags flags_core.define_base( model_dir=True, clean=True, train_epochs=True, epochs_between_evals=True, export_dir=False, run_eagerly=True, stop_threshold=True, num_gpu=True, distribution_strategy=True) flags_core.define_performance( synthetic_data=True, dtype=True, fp16_implementation=True, loss_scale=True, enable_xla=True, ) flags_core.define_device(tpu=True) flags_core.define_benchmark() flags.adopt_module_key_flags(flags_core) movielens.define_flags() flags_core.set_defaults( model_dir="/tmp/ncf/", data_dir="/tmp/movielens-data/", dataset=movielens.ML_1M, train_epochs=2, batch_size=99000, tpu=None) # Add ncf-specific flags flags.DEFINE_boolean( name="download_if_missing", default=True, help=flags_core.help_wrap( "Download data to data_dir if it is not already present.")) flags.DEFINE_integer( name="eval_batch_size", default=None, help=flags_core.help_wrap( "The batch size used for evaluation. This should generally be larger" "than the training batch size as the lack of back propagation during" "evaluation can allow for larger batch sizes to fit in memory. If not" "specified, the training batch size (--batch_size) will be used.")) flags.DEFINE_integer( name="num_factors", default=8, help=flags_core.help_wrap("The Embedding size of MF model.")) # Set the default as a list of strings to be consistent with input arguments flags.DEFINE_list( name="layers", default=["64", "32", "16", "8"], help=flags_core.help_wrap( "The sizes of hidden layers for MLP. Example " "to specify different sizes of MLP layers: --layers=32,16,8,4")) flags.DEFINE_float( name="mf_regularization", default=0., help=flags_core.help_wrap( "The regularization factor for MF embeddings. The factor is used by " "regularizer which allows to apply penalties on layer parameters or " "layer activity during optimization.")) flags.DEFINE_list( name="mlp_regularization", default=["0.", "0.", "0.", "0."], help=flags_core.help_wrap( "The regularization factor for each MLP layer. See mf_regularization " "help for more info about regularization factor.")) flags.DEFINE_integer( name="num_neg", default=4, help=flags_core.help_wrap( "The Number of negative instances to pair with a positive instance.")) flags.DEFINE_float( name="learning_rate", default=0.001, help=flags_core.help_wrap("The learning rate.")) flags.DEFINE_float( name="beta1", default=0.9, help=flags_core.help_wrap("beta1 hyperparameter for the Adam optimizer.")) flags.DEFINE_float( name="beta2", default=0.999, help=flags_core.help_wrap("beta2 hyperparameter for the Adam optimizer.")) flags.DEFINE_float( name="epsilon", default=1e-8, help=flags_core.help_wrap("epsilon hyperparameter for the Adam " "optimizer.")) flags.DEFINE_float( name="hr_threshold", default=1.0, help=flags_core.help_wrap( "If passed, training will stop when the evaluation metric HR is " "greater than or equal to hr_threshold. For dataset ml-1m, the " "desired hr_threshold is 0.68 which is the result from the paper; " "For dataset ml-20m, the threshold can be set as 0.95 which is " "achieved by MLPerf implementation.")) flags.DEFINE_enum( name="constructor_type", default="bisection", enum_values=["bisection", "materialized"], case_sensitive=False, help=flags_core.help_wrap( "Strategy to use for generating false negatives. materialized has a" "precompute that scales badly, but a faster per-epoch construction" "time and can be faster on very large systems.")) flags.DEFINE_string( name="train_dataset_path", default=None, help=flags_core.help_wrap("Path to training data.")) flags.DEFINE_string( name="eval_dataset_path", default=None, help=flags_core.help_wrap("Path to evaluation data.")) flags.DEFINE_string( name="input_meta_data_path", default=None, help=flags_core.help_wrap("Path to input meta data file.")) flags.DEFINE_bool( name="ml_perf", default=False, help=flags_core.help_wrap( "If set, changes the behavior of the model slightly to match the " "MLPerf reference implementations here: \n" "https://github.com/mlperf/reference/tree/master/recommendation/" "pytorch\n" "The two changes are:\n" "1. When computing the HR and NDCG during evaluation, remove " "duplicate user-item pairs before the computation. This results in " "better HRs and NDCGs.\n" "2. Use a different soring algorithm when sorting the input data, " "which performs better due to the fact the sorting algorithms are " "not stable.")) flags.DEFINE_bool( name="output_ml_perf_compliance_logging", default=False, help=flags_core.help_wrap( "If set, output the MLPerf compliance logging. This is only useful " "if one is running the model for MLPerf. See " "https://github.com/mlperf/policies/blob/master/training_rules.adoc" "#submission-compliance-logs for details. This uses sudo and so may " "ask for your password, as root access is needed to clear the system " "caches, which is required for MLPerf compliance.")) flags.DEFINE_integer( name="seed", default=None, help=flags_core.help_wrap( "This value will be used to seed both NumPy and TensorFlow.")) @flags.validator( "eval_batch_size", "eval_batch_size must be at least {}".format(rconst.NUM_EVAL_NEGATIVES + 1)) def eval_size_check(eval_batch_size): return (eval_batch_size is None or int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES) flags.DEFINE_bool( name="early_stopping", default=False, help=flags_core.help_wrap( "If True, we stop the training when it reaches hr_threshold")) flags.DEFINE_bool( name="keras_use_ctl", default=False, help=flags_core.help_wrap( "If True, we use a custom training loop for keras.")) def convert_to_softmax_logits(logits): """Convert the logits returned by the base model to softmax logits. Args: logits: used to create softmax. Returns: Softmax with the first column of zeros is equivalent to sigmoid. """ softmax_logits = tf.concat([logits * 0, logits], axis=1) return softmax_logits
12,278
33.784703
80
py
models
models-master/official/recommendation/ncf_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests NCF.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import tensorflow as tf from tensorflow.python.eager import context # pylint: disable=ungrouped-imports from official.recommendation import constants as rconst from official.recommendation import ncf_common from official.recommendation import ncf_keras_main from official.utils.testing import integration NUM_TRAIN_NEG = 4 class NcfTest(tf.test.TestCase): @classmethod def setUpClass(cls): # pylint: disable=invalid-name super(NcfTest, cls).setUpClass() ncf_common.define_ncf_flags() def setUp(self): super().setUp() self.top_k_old = rconst.TOP_K self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES rconst.NUM_EVAL_NEGATIVES = 2 def tearDown(self): super().tearDown() rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old rconst.TOP_K = self.top_k_old _BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1'] @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_no_dist_strat(self): integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=self._BASE_END_TO_END_FLAGS + ['-distribution_strategy', 'off']) @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_dist_strat(self): integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0']) @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_dist_strat_ctl(self): flags = ( self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'] + ['-keras_use_ctl', 'True']) integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=flags) @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_1_gpu_dist_strat_fp16(self): if context.num_gpus() < 1: self.skipTest( '{} GPUs are not available for this test. {} GPUs are available' .format(1, context.num_gpus())) integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1', '--dtype', 'fp16']) @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_1_gpu_dist_strat_ctl_fp16(self): if context.num_gpus() < 1: self.skipTest( '{} GPUs are not available for this test. {} GPUs are available' .format(1, context.num_gpus())) integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1', '--dtype', 'fp16', '--keras_use_ctl']) @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) def test_end_to_end_keras_2_gpu_fp16(self): if context.num_gpus() < 2: self.skipTest( '{} GPUs are not available for this test. {} GPUs are available' .format(2, context.num_gpus())) integration.run_synthetic( ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '2', '--dtype', 'fp16']) if __name__ == '__main__': tf.test.main()
4,134
34.042373
80
py
models
models-master/official/recommendation/ranking/task.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Task for the Ranking model.""" import math from typing import Dict, List, Optional, Union import tensorflow as tf import tensorflow_recommenders as tfrs from official.core import base_task from official.core import config_definitions from official.recommendation.ranking import common from official.recommendation.ranking.configs import config from official.recommendation.ranking.data import data_pipeline RuntimeConfig = config_definitions.RuntimeConfig def _get_tpu_embedding_feature_config( vocab_sizes: List[int], embedding_dim: Union[int, List[int]], table_name_prefix: str = 'embedding_table' ) -> Dict[str, tf.tpu.experimental.embedding.FeatureConfig]: """Returns TPU embedding feature config. i'th table config will have vocab size of vocab_sizes[i] and embedding dimension of embedding_dim if embedding_dim is an int or embedding_dim[i] if embedding_dim is a list). Args: vocab_sizes: List of sizes of categories/id's in the table. embedding_dim: An integer or a list of embedding table dimensions. table_name_prefix: a prefix for embedding tables. Returns: A dictionary of feature_name, FeatureConfig pairs. """ if isinstance(embedding_dim, List): if len(vocab_sizes) != len(embedding_dim): raise ValueError( f'length of vocab_sizes: {len(vocab_sizes)} is not equal to the ' f'length of embedding_dim: {len(embedding_dim)}') elif isinstance(embedding_dim, int): embedding_dim = [embedding_dim] * len(vocab_sizes) else: raise ValueError('embedding_dim is not either a list or an int, got ' f'{type(embedding_dim)}') feature_config = {} for i, vocab_size in enumerate(vocab_sizes): table_config = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=vocab_size, dim=embedding_dim[i], combiner='mean', initializer=tf.initializers.TruncatedNormal( mean=0.0, stddev=1 / math.sqrt(embedding_dim[i])), name=table_name_prefix + '_%s' % i) feature_config[str(i)] = tf.tpu.experimental.embedding.FeatureConfig( table=table_config) return feature_config class RankingTask(base_task.Task): """A task for Ranking Model.""" def __init__(self, params: config.Task, optimizer_config: config.OptimizationConfig, logging_dir: Optional[str] = None, steps_per_execution: int = 1, name: Optional[str] = None): """Task initialization. Args: params: the RankingModel task configuration instance. optimizer_config: Optimizer configuration instance. logging_dir: a string pointing to where the model, summaries etc. will be saved. steps_per_execution: Int. Defaults to 1. The number of batches to run during each `tf.function` call. It's used for compile/fit API. name: the task name. """ super().__init__(params, logging_dir, name=name) self._optimizer_config = optimizer_config self._steps_per_execution = steps_per_execution def build_inputs(self, params, input_context=None): """Builds classification input.""" dataset = data_pipeline.CriteoTsvReader( file_pattern=params.input_path, params=params, vocab_sizes=self.task_config.model.vocab_sizes, num_dense_features=self.task_config.model.num_dense_features, use_synthetic_data=self.task_config.use_synthetic_data) return dataset(input_context) @classmethod def create_optimizer(cls, optimizer_config: config.OptimizationConfig, runtime_config: Optional[RuntimeConfig] = None) -> None: """See base class. Return None, optimizer is set in `build_model`.""" return None def build_model(self) -> tf.keras.Model: """Creates Ranking model architecture and Optimizers. The RankingModel uses different optimizers/learning rates for embedding variables and dense variables. Returns: A Ranking model instance. """ lr_config = self.optimizer_config.lr_config lr_callable = common.WarmUpAndPolyDecay( batch_size=self.task_config.train_data.global_batch_size, decay_exp=lr_config.decay_exp, learning_rate=lr_config.learning_rate, warmup_steps=lr_config.warmup_steps, decay_steps=lr_config.decay_steps, decay_start_steps=lr_config.decay_start_steps) dense_optimizer = tf.keras.optimizers.legacy.Adam() embedding_optimizer = tf.keras.optimizers.get( self.optimizer_config.embedding_optimizer, use_legacy_optimizer=True) embedding_optimizer.learning_rate = lr_callable feature_config = _get_tpu_embedding_feature_config( embedding_dim=self.task_config.model.embedding_dim, vocab_sizes=self.task_config.model.vocab_sizes) embedding_layer = tfrs.experimental.layers.embedding.PartialTPUEmbedding( feature_config=feature_config, optimizer=embedding_optimizer, size_threshold=self.task_config.model.size_threshold) if self.task_config.model.interaction == 'dot': feature_interaction = tfrs.layers.feature_interaction.DotInteraction( skip_gather=True) elif self.task_config.model.interaction == 'cross': feature_interaction = tf.keras.Sequential([ tf.keras.layers.Concatenate(), tfrs.layers.feature_interaction.Cross() ]) else: raise ValueError( f'params.task.model.interaction {self.task_config.model.interaction} ' f'is not supported it must be either \'dot\' or \'cross\'.') model = tfrs.experimental.models.Ranking( embedding_layer=embedding_layer, bottom_stack=tfrs.layers.blocks.MLP( units=self.task_config.model.bottom_mlp, final_activation='relu'), feature_interaction=feature_interaction, top_stack=tfrs.layers.blocks.MLP( units=self.task_config.model.top_mlp, final_activation='sigmoid'), ) optimizer = tfrs.experimental.optimizers.CompositeOptimizer([ (embedding_optimizer, lambda: model.embedding_trainable_variables), (dense_optimizer, lambda: model.dense_trainable_variables), ]) model.compile(optimizer, steps_per_execution=self._steps_per_execution) return model def train_step( self, inputs: Dict[str, tf.Tensor], model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics: Optional[List[tf.keras.metrics.Metric]] = None) -> tf.Tensor: """See base class.""" # All metrics need to be passed through the RankingModel. assert metrics == model.metrics return model.train_step(inputs) def validation_step( self, inputs: Dict[str, tf.Tensor], model: tf.keras.Model, metrics: Optional[List[tf.keras.metrics.Metric]] = None) -> tf.Tensor: """See base class.""" # All metrics need to be passed through the RankingModel. assert metrics == model.metrics return model.test_step(inputs) @property def optimizer_config(self) -> config.OptimizationConfig: return self._optimizer_config
7,723
36.678049
80
py
models
models-master/official/recommendation/ranking/common.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flags and common definitions for Ranking Models.""" from absl import flags import tensorflow as tf from official.common import flags as tfm_flags FLAGS = flags.FLAGS def define_flags() -> None: """Defines flags for training the Ranking model.""" tfm_flags.define_flags() FLAGS.set_default(name='experiment', value='dlrm_criteo') FLAGS.set_default(name='mode', value='train_and_eval') flags.DEFINE_integer( name='seed', default=None, help='This value will be used to seed both NumPy and TensorFlow.') flags.DEFINE_string( name='profile_steps', default='20,40', help='Save profiling data to model dir at given range of global steps. ' 'The value must be a comma separated pair of positive integers, ' 'specifying the first and last step to profile. For example, ' '"--profile_steps=2,4" triggers the profiler to process 3 steps, starting' ' from the 2nd step. Note that profiler has a non-trivial performance ' 'overhead, and the output file can be gigantic if profiling many steps.') @tf.keras.utils.register_keras_serializable(package='RANKING') class WarmUpAndPolyDecay(tf.keras.optimizers.schedules.LearningRateSchedule): """Learning rate callable for the embeddings. Linear warmup on [0, warmup_steps] then Constant on [warmup_steps, decay_start_steps] And polynomial decay on [decay_start_steps, decay_start_steps + decay_steps]. """ def __init__(self, batch_size: int, decay_exp: float = 2.0, learning_rate: float = 40.0, warmup_steps: int = 8000, decay_steps: int = 12000, decay_start_steps: int = 10000): super(WarmUpAndPolyDecay, self).__init__() self.batch_size = batch_size self.decay_exp = decay_exp self.learning_rate = learning_rate self.warmup_steps = warmup_steps self.decay_steps = decay_steps self.decay_start_steps = decay_start_steps def __call__(self, step): decay_exp = self.decay_exp learning_rate = self.learning_rate warmup_steps = self.warmup_steps decay_steps = self.decay_steps decay_start_steps = self.decay_start_steps scal = self.batch_size / 2048 adj_lr = learning_rate * scal if warmup_steps == 0: return adj_lr warmup_lr = step / warmup_steps * adj_lr global_step = tf.cast(step, tf.float32) decay_steps = tf.cast(decay_steps, tf.float32) decay_start_step = tf.cast(decay_start_steps, tf.float32) warmup_lr = tf.cast(warmup_lr, tf.float32) steps_since_decay_start = global_step - decay_start_step already_decayed_steps = tf.minimum(steps_since_decay_start, decay_steps) decay_lr = adj_lr * ( (decay_steps - already_decayed_steps) / decay_steps)**decay_exp decay_lr = tf.maximum(0.0001, decay_lr) lr = tf.where( global_step < warmup_steps, warmup_lr, tf.where( tf.logical_and(decay_steps > 0, global_step > decay_start_step), decay_lr, adj_lr)) lr = tf.maximum(0.01, lr) return lr def get_config(self): return { 'batch_size': self.batch_size, 'decay_exp': self.decay_exp, 'learning_rate': self.learning_rate, 'warmup_steps': self.warmup_steps, 'decay_steps': self.decay_steps, 'decay_start_steps': self.decay_start_steps }
3,988
33.991228
80
py
models
models-master/official/recommendation/ranking/train.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Train and evaluate the Ranking model.""" from typing import Dict from absl import app from absl import flags from absl import logging import tensorflow as tf from official.common import distribute_utils from official.core import base_trainer from official.core import train_lib from official.core import train_utils from official.recommendation.ranking import common from official.recommendation.ranking.task import RankingTask from official.utils.misc import keras_utils FLAGS = flags.FLAGS class RankingTrainer(base_trainer.Trainer): """A trainer for Ranking Model. The RankingModel has two optimizers for embedding and non embedding weights. Overriding `train_loop_end` method to log learning rates for each optimizer. """ def train_loop_end(self) -> Dict[str, float]: """See base class.""" self.join() logs = {} for metric in self.train_metrics + [self.train_loss]: logs[metric.name] = metric.result() metric.reset_states() for i, optimizer in enumerate(self.optimizer.optimizers): lr_key = f'{type(optimizer).__name__}_{i}_learning_rate' if callable(optimizer.learning_rate): logs[lr_key] = optimizer.learning_rate(self.global_step) else: logs[lr_key] = optimizer.learning_rate return logs def main(_) -> None: """Train and evaluate the Ranking model.""" params = train_utils.parse_configuration(FLAGS) mode = FLAGS.mode model_dir = FLAGS.model_dir if 'train' in FLAGS.mode: # Pure eval modes do not output yaml files. Otherwise continuous eval job # may race against the train job for writing the same file. train_utils.serialize_config(params, model_dir) if FLAGS.seed is not None: logging.info('Setting tf seed.') tf.random.set_seed(FLAGS.seed) task = RankingTask( params=params.task, optimizer_config=params.trainer.optimizer_config, logging_dir=model_dir, steps_per_execution=params.trainer.steps_per_loop, name='RankingTask') enable_tensorboard = params.trainer.callbacks.enable_tensorboard strategy = distribute_utils.get_distribution_strategy( distribution_strategy=params.runtime.distribution_strategy, all_reduce_alg=params.runtime.all_reduce_alg, num_gpus=params.runtime.num_gpus, tpu_address=params.runtime.tpu) with strategy.scope(): model = task.build_model() def get_dataset_fn(params): return lambda input_context: task.build_inputs(params, input_context) train_dataset = None if 'train' in mode: train_dataset = strategy.distribute_datasets_from_function( get_dataset_fn(params.task.train_data), options=tf.distribute.InputOptions(experimental_fetch_to_device=False)) validation_dataset = None if 'eval' in mode: validation_dataset = strategy.distribute_datasets_from_function( get_dataset_fn(params.task.validation_data), options=tf.distribute.InputOptions(experimental_fetch_to_device=False)) if params.trainer.use_orbit: with strategy.scope(): checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter( params, model_dir) trainer = RankingTrainer( config=params, task=task, model=model, optimizer=model.optimizer, train='train' in mode, evaluate='eval' in mode, train_dataset=train_dataset, validation_dataset=validation_dataset, checkpoint_exporter=checkpoint_exporter) train_lib.run_experiment( distribution_strategy=strategy, task=task, mode=mode, params=params, model_dir=model_dir, trainer=trainer) else: # Compile/fit checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer) latest_checkpoint = tf.train.latest_checkpoint(model_dir) if latest_checkpoint: checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=model_dir, max_to_keep=params.trainer.max_to_keep, step_counter=model.optimizer.iterations, checkpoint_interval=params.trainer.checkpoint_interval) checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager) time_callback = keras_utils.TimeHistory( params.task.train_data.global_batch_size, params.trainer.time_history.log_steps, logdir=model_dir if enable_tensorboard else None) callbacks = [checkpoint_callback, time_callback] if enable_tensorboard: tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=model_dir, update_freq=min(1000, params.trainer.validation_interval), profile_batch=FLAGS.profile_steps) callbacks.append(tensorboard_callback) num_epochs = (params.trainer.train_steps // params.trainer.validation_interval) current_step = model.optimizer.iterations.numpy() initial_epoch = current_step // params.trainer.validation_interval eval_steps = params.trainer.validation_steps if 'eval' in mode else None if mode in ['train', 'train_and_eval']: logging.info('Training started') history = model.fit( train_dataset, initial_epoch=initial_epoch, epochs=num_epochs, steps_per_epoch=params.trainer.validation_interval, validation_data=validation_dataset, validation_steps=eval_steps, callbacks=callbacks, ) model.summary() logging.info('Train history: %s', history.history) elif mode == 'eval': logging.info('Evaluation started') validation_output = model.evaluate(validation_dataset, steps=eval_steps) logging.info('Evaluation output: %s', validation_output) else: raise NotImplementedError('The mode is not implemented: %s' % mode) if __name__ == '__main__': logging.set_verbosity(logging.INFO) common.define_flags() app.run(main)
6,613
33.628272
79
py
models
models-master/official/recommendation/ranking/configs/config.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Ranking Model configuration definition.""" import dataclasses from typing import List, Optional, Union from official.core import config_definitions as cfg from official.core import exp_factory from official.modeling import hyperparams @dataclasses.dataclass class CallbacksConfig(hyperparams.Config): """Configuration for Callbacks. Attributes: enable_checkpoint_and_export: Whether or not to enable checkpoints as a Callback. Defaults to True. enable_backup_and_restore: Whether or not to add BackupAndRestore callback. Defaults to True. enable_tensorboard: Whether or not to enable TensorBoard as a Callback. Defaults to True. enable_time_history: Whether or not to enable TimeHistory Callbacks. Defaults to True. """ enable_checkpoint_and_export: bool = True enable_backup_and_restore: bool = False enable_tensorboard: bool = True enable_time_history: bool = True @dataclasses.dataclass class LearningRateConfig(hyperparams.Config): """Learning rate scheduler config.""" learning_rate: float = 1.25 warmup_steps: int = 8000 decay_steps: int = 30000 decay_start_steps: int = 70000 decay_exp: float = 2 @dataclasses.dataclass class OptimizationConfig(hyperparams.Config): """Embedding Optimizer config.""" lr_config: LearningRateConfig = dataclasses.field( default_factory=LearningRateConfig ) embedding_optimizer: str = 'SGD' @dataclasses.dataclass class DataConfig(hyperparams.Config): """Dataset config for training and evaluation.""" input_path: str = '' global_batch_size: int = 0 is_training: bool = True dtype: str = 'float32' shuffle_buffer_size: int = 10000 cycle_length: int = 10 sharding: bool = True num_shards_per_host: int = 8 @dataclasses.dataclass class ModelConfig(hyperparams.Config): """Configuration for training. Attributes: num_dense_features: Number of dense features. vocab_sizes: Vocab sizes for each of the sparse features. The order agrees with the order of the input data. embedding_dim: An integer or a list of embedding table dimensions. If it's an integer then all tables will have the same embedding dimension. If it's a list then the length should match with `vocab_sizes`. size_threshold: A threshold for table sizes below which a keras embedding layer is used, and above which a TPU embedding layer is used. If it's -1 then only keras embedding layer will be used for all tables, if 0 only then only TPU embedding layer will be used. bottom_mlp: The sizes of hidden layers for bottom MLP applied to dense features. top_mlp: The sizes of hidden layers for top MLP. interaction: Interaction can be on of the following: 'dot', 'cross'. """ num_dense_features: int = 13 vocab_sizes: List[int] = dataclasses.field(default_factory=list) embedding_dim: Union[int, List[int]] = 8 size_threshold: int = 50_000 bottom_mlp: List[int] = dataclasses.field(default_factory=list) top_mlp: List[int] = dataclasses.field(default_factory=list) interaction: str = 'dot' @dataclasses.dataclass class Loss(hyperparams.Config): """Configuration for Loss. Attributes: label_smoothing: Whether or not to apply label smoothing to the Binary Crossentropy loss. """ label_smoothing: float = 0.0 @dataclasses.dataclass class Task(hyperparams.Config): """The model config.""" init_checkpoint: str = '' model: ModelConfig = dataclasses.field(default_factory=ModelConfig) train_data: DataConfig = dataclasses.field( default_factory=lambda: DataConfig(is_training=True) ) validation_data: DataConfig = dataclasses.field( default_factory=lambda: DataConfig(is_training=False) ) loss: Loss = dataclasses.field(default_factory=Loss) use_synthetic_data: bool = False @dataclasses.dataclass class TimeHistoryConfig(hyperparams.Config): """Configuration for the TimeHistory callback. Attributes: log_steps: Interval of steps between logging of batch level stats. """ log_steps: Optional[int] = None @dataclasses.dataclass class TrainerConfig(cfg.TrainerConfig): """Configuration for training. Attributes: train_steps: The number of steps used to train. validation_steps: The number of steps used to eval. validation_interval: The Number of training steps to run between evaluations. callbacks: An instance of CallbacksConfig. use_orbit: Whether to use orbit library with custom training loop or compile/fit API. enable_metrics_in_training: Whether to enable metrics during training. time_history: Config of TimeHistory callback. optimizer_config: An `OptimizerConfig` instance for embedding optimizer. Defaults to None. """ train_steps: int = 0 # Sets validation steps to be -1 to evaluate the entire dataset. validation_steps: int = -1 validation_interval: int = 70000 callbacks: CallbacksConfig = dataclasses.field( default_factory=CallbacksConfig ) use_orbit: bool = False enable_metrics_in_training: bool = True time_history: TimeHistoryConfig = dataclasses.field( default_factory=lambda: TimeHistoryConfig(log_steps=5000) ) optimizer_config: OptimizationConfig = dataclasses.field( default_factory=OptimizationConfig ) NUM_TRAIN_EXAMPLES = 4195197692 NUM_EVAL_EXAMPLES = 89137318 train_batch_size = 16384 eval_batch_size = 16384 steps_per_epoch = NUM_TRAIN_EXAMPLES // train_batch_size vocab_sizes = [ 39884406, 39043, 17289, 7420, 20263, 3, 7120, 1543, 63, 38532951, 2953546, 403346, 10, 2208, 11938, 155, 4, 976, 14, 39979771, 25641295, 39664984, 585935, 12972, 108, 36 ] @dataclasses.dataclass class Config(hyperparams.Config): """Configuration to train the RankingModel. By default it configures DLRM model on criteo dataset. Attributes: runtime: A `RuntimeConfig` instance. task: `Task` instance. trainer: A `TrainerConfig` instance. """ runtime: cfg.RuntimeConfig = dataclasses.field( default_factory=cfg.RuntimeConfig ) task: Task = dataclasses.field( default_factory=lambda: Task( # pylint: disable=g-long-lambda model=ModelConfig( embedding_dim=8, vocab_sizes=vocab_sizes, bottom_mlp=[64, 32, 8], top_mlp=[64, 32, 1], ), loss=Loss(label_smoothing=0.0), train_data=DataConfig( is_training=True, global_batch_size=train_batch_size ), validation_data=DataConfig( is_training=False, global_batch_size=eval_batch_size ), ) ) trainer: TrainerConfig = dataclasses.field( default_factory=lambda: TrainerConfig( # pylint: disable=g-long-lambda train_steps=2 * steps_per_epoch, validation_interval=steps_per_epoch, validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size, enable_metrics_in_training=True, optimizer_config=OptimizationConfig(), ) ) restrictions: dataclasses.InitVar[Optional[List[str]]] = None def default_config() -> Config: return Config( runtime=cfg.RuntimeConfig(), task=Task( model=ModelConfig( embedding_dim=8, vocab_sizes=vocab_sizes, bottom_mlp=[64, 32, 4], top_mlp=[64, 32, 1]), loss=Loss(label_smoothing=0.0), train_data=DataConfig( global_batch_size=train_batch_size, is_training=True, sharding=True), validation_data=DataConfig( global_batch_size=eval_batch_size, is_training=False, sharding=False)), trainer=TrainerConfig( train_steps=2 * steps_per_epoch, validation_interval=steps_per_epoch, validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size, enable_metrics_in_training=True, optimizer_config=OptimizationConfig()), restrictions=[ 'task.train_data.is_training != None', 'task.validation_data.is_training != None', ]) @exp_factory.register_config_factory('dlrm_criteo') def dlrm_criteo_tb_config() -> Config: return Config( runtime=cfg.RuntimeConfig(), task=Task( model=ModelConfig( num_dense_features=13, vocab_sizes=vocab_sizes, bottom_mlp=[512, 256, 64], embedding_dim=64, top_mlp=[1024, 1024, 512, 256, 1], interaction='dot'), loss=Loss(label_smoothing=0.0), train_data=DataConfig( global_batch_size=train_batch_size, is_training=True, sharding=True), validation_data=DataConfig( global_batch_size=eval_batch_size, is_training=False, sharding=False)), trainer=TrainerConfig( train_steps=steps_per_epoch, validation_interval=steps_per_epoch // 2, validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size, enable_metrics_in_training=True, optimizer_config=OptimizationConfig()), restrictions=[ 'task.train_data.is_training != None', 'task.validation_data.is_training != None', ]) @exp_factory.register_config_factory('dcn_criteo') def dcn_criteo_tb_config() -> Config: return Config( runtime=cfg.RuntimeConfig(), task=Task( model=ModelConfig( num_dense_features=13, vocab_sizes=vocab_sizes, bottom_mlp=[512, 256, 64], embedding_dim=64, top_mlp=[1024, 1024, 512, 256, 1], interaction='cross'), loss=Loss(label_smoothing=0.0), train_data=DataConfig( global_batch_size=train_batch_size, is_training=True, sharding=True), validation_data=DataConfig( global_batch_size=eval_batch_size, is_training=False, sharding=False)), trainer=TrainerConfig( train_steps=steps_per_epoch, validation_interval=steps_per_epoch // 2, validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size, enable_metrics_in_training=True, optimizer_config=OptimizationConfig()), restrictions=[ 'task.train_data.is_training != None', 'task.validation_data.is_training != None', ])
11,077
33.086154
80
py
models
models-master/official/utils/testing/mock_task.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mock task for testing.""" import dataclasses import numpy as np import tensorflow as tf from official.core import base_task from official.core import config_definitions as cfg from official.core import exp_factory from official.modeling.hyperparams import base_config class MockModel(tf.keras.Model): def __init__(self, network): super().__init__() self.network = network def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks outputs = self.network(inputs) self.add_loss(tf.reduce_mean(outputs)) return outputs @dataclasses.dataclass class MockTaskConfig(cfg.TaskConfig): pass @base_config.bind(MockTaskConfig) class MockTask(base_task.Task): """Mock task object for testing.""" def __init__(self, params=None, logging_dir=None, name=None): super().__init__(params=params, logging_dir=logging_dir, name=name) def build_model(self, *arg, **kwargs): inputs = tf.keras.layers.Input(shape=(2,), name="random", dtype=tf.float32) outputs = tf.keras.layers.Dense( 1, bias_initializer=tf.keras.initializers.Ones(), name="dense_0")( inputs) network = tf.keras.Model(inputs=inputs, outputs=outputs) return MockModel(network) def build_metrics(self, training: bool = True): del training return [tf.keras.metrics.Accuracy(name="acc")] def validation_step(self, inputs, model: tf.keras.Model, metrics=None): logs = super().validation_step(inputs, model, metrics) logs["counter"] = tf.constant(1, dtype=tf.float32) return logs def build_inputs(self, params): def generate_data(_): x = tf.zeros(shape=(2,), dtype=tf.float32) label = tf.zeros([1], dtype=tf.int32) return x, label dataset = tf.data.Dataset.range(1) dataset = dataset.repeat() dataset = dataset.map( generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset.prefetch(buffer_size=1).batch(2, drop_remainder=True) def aggregate_logs(self, state, step_outputs): if state is None: state = {} for key, value in step_outputs.items(): if key not in state: state[key] = [] state[key].append( np.concatenate([np.expand_dims(v.numpy(), axis=0) for v in value])) return state def reduce_aggregated_logs(self, aggregated_logs, global_step=None): for k, v in aggregated_logs.items(): aggregated_logs[k] = np.sum(np.stack(v, axis=0)) return aggregated_logs @exp_factory.register_config_factory("mock") def mock_experiment() -> cfg.ExperimentConfig: config = cfg.ExperimentConfig( task=MockTaskConfig(), trainer=cfg.TrainerConfig()) return config
3,302
31.067961
100
py
models
models-master/official/utils/docs/build_tfm_api_docs.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Tool to generate api_docs for tensorflow_models/official library. Example: $> pip install -U git+https://github.com/tensorflow/docs $> python build_nlp_api_docs.py --output_dir=/tmp/api_docs """ import pathlib from absl import app from absl import flags from absl import logging import tensorflow as tf from tensorflow_docs.api_generator import doc_controls from tensorflow_docs.api_generator import generate_lib from tensorflow_docs.api_generator import parser from tensorflow_docs.api_generator import public_api from tensorflow_docs.api_generator.pretty_docs import base_page from tensorflow_docs.api_generator.pretty_docs import function_page import tensorflow_models as tfm FLAGS = flags.FLAGS flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.') flags.DEFINE_string( 'code_url_prefix', 'https://github.com/tensorflow/models/blob/master/tensorflow_models', 'The url prefix for links to code.') flags.DEFINE_bool('search_hints', True, 'Include metadata search hints in the generated files') flags.DEFINE_string('site_path', '/api_docs/python', 'Path prefix in the _toc.yaml') PROJECT_SHORT_NAME = 'tfm' PROJECT_FULL_NAME = 'TensorFlow Modeling Library' class ExpFactoryInfo(function_page.FunctionPageInfo): """Customize the page for the experiment factory.""" def collect_docs(self): super().collect_docs() self.doc.docstring_parts.append(self.make_factory_options_table()) def make_factory_options_table(self): lines = [ '', 'Allowed values for `exp_name`:', '', # The indent is important here, it keeps the site's markdown parser # from switching to HTML mode. ' <table>\n', '<th><code>exp_name</code></th><th>Description</th>', ] reference_resolver = self.parser_config.reference_resolver api_tree = self.parser_config.api_tree for name, fn in sorted(tfm.core.exp_factory._REGISTERED_CONFIGS.items()): # pylint: disable=protected-access fn_api_node = api_tree.node_for_object(fn) if fn_api_node is None: location = parser.get_defined_in(self.py_object, self.parser_config) link = base_page.small_source_link(location, name) else: link = reference_resolver.python_link(name, fn_api_node.full_name) doc = fn.__doc__ if doc: doc = doc.splitlines()[0] else: doc = '' lines.append(f'<tr><td>{link}</td><td>{doc}</td></tr>') lines.append('</table>') return '\n'.join(lines) def hide_module_model_and_layer_methods(): """Hide methods and properties defined in the base classes of Keras layers. We hide all methods and properties of the base classes, except: - `__init__` is always documented. - `call` is always documented, as it can carry important information for complex layers. """ module_contents = list(tf.Module.__dict__.items()) model_contents = list(tf.keras.Model.__dict__.items()) layer_contents = list(tf.keras.layers.Layer.__dict__.items()) for name, obj in module_contents + layer_contents + model_contents: if name == '__init__': # Always document __init__. continue if name == 'call': # Always document `call`. if hasattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS): # pylint: disable=protected-access delattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS) # pylint: disable=protected-access continue # Otherwise, exclude from documentation. if isinstance(obj, property): obj = obj.fget if isinstance(obj, (staticmethod, classmethod)): obj = obj.__func__ try: doc_controls.do_not_doc_in_subclasses(obj) except AttributeError: pass def custom_filter(path, parent, children): if len(path) <= 2: # Don't filter the contents of the top level `tfm.vision` package. return children else: return public_api.explicit_package_contents_filter(path, parent, children) def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name, project_full_name, search_hints): """Generates api docs for the tensorflow docs package.""" hide_module_model_and_layer_methods() del tfm.nlp.layers.MultiHeadAttention del tfm.nlp.layers.EinsumDense doc_controls.set_custom_page_builder_cls(tfm.core.exp_factory.get_exp_config, ExpFactoryInfo) url_parts = code_url_prefix.strip('/').split('/') url_parts = url_parts[:url_parts.index('tensorflow_models')] url_parts.append('official') official_url_prefix = '/'.join(url_parts) tfm_base_dir = pathlib.Path(tfm.__file__).parent # The `layers` submodule (and others) are actually defined in the `official` # package. Find the path to `official`. official_base_dir = [ p for p in pathlib.Path(tfm.vision.layers.__file__).parents if p.name == 'official' ][0] doc_generator = generate_lib.DocGenerator( root_title=project_full_name, py_modules=[(project_short_name, tfm)], base_dir=[tfm_base_dir, official_base_dir], code_url_prefix=[ code_url_prefix, official_url_prefix, ], search_hints=search_hints, site_path=site_path, callbacks=[custom_filter], ) doc_generator.build(output_dir) logging.info('Output docs to: %s', output_dir) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') gen_api_docs( code_url_prefix=FLAGS.code_url_prefix, site_path=FLAGS.site_path, output_dir=FLAGS.output_dir, project_short_name=PROJECT_SHORT_NAME, project_full_name=PROJECT_FULL_NAME, search_hints=FLAGS.search_hints) if __name__ == '__main__': flags.mark_flag_as_required('output_dir') app.run(main)
6,437
31.515152
114
py
models
models-master/official/utils/docs/build_orbit_api_docs.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Tool to generate api_docs for tensorflow_models/official library. Example: $> pip install -U git+https://github.com/tensorflow/docs $> python build_orbit_api_docs.py --output_dir=/tmp/api_docs """ from absl import app from absl import flags from absl import logging import orbit import tensorflow as tf from tensorflow_docs.api_generator import doc_controls from tensorflow_docs.api_generator import generate_lib from tensorflow_docs.api_generator import public_api FLAGS = flags.FLAGS flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.') flags.DEFINE_string('code_url_prefix', 'https://github.com/tensorflow/models/blob/master/orbit', 'The url prefix for links to code.') flags.DEFINE_bool('search_hints', True, 'Include metadata search hints in the generated files') flags.DEFINE_string('site_path', '/api_docs/python', 'Path prefix in the _toc.yaml') PROJECT_SHORT_NAME = 'orbit' PROJECT_FULL_NAME = 'Orbit' def hide_module_model_and_layer_methods(): """Hide methods and properties defined in the base classes of Keras layers. We hide all methods and properties of the base classes, except: - `__init__` is always documented. - `call` is always documented, as it can carry important information for complex layers. """ module_contents = list(tf.Module.__dict__.items()) model_contents = list(tf.keras.Model.__dict__.items()) layer_contents = list(tf.keras.layers.Layer.__dict__.items()) for name, obj in module_contents + layer_contents + model_contents: if name == '__init__': # Always document __init__. continue if name == 'call': # Always document `call`. if hasattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS): # pylint: disable=protected-access delattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS) # pylint: disable=protected-access continue # Otherwise, exclude from documentation. if isinstance(obj, property): obj = obj.fget if isinstance(obj, (staticmethod, classmethod)): obj = obj.__func__ try: doc_controls.do_not_doc_in_subclasses(obj) except AttributeError: pass def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name, project_full_name, search_hints): """Generates api docs for the tensorflow docs package.""" doc_generator = generate_lib.DocGenerator( root_title=project_full_name, py_modules=[(project_short_name, orbit)], code_url_prefix=code_url_prefix, search_hints=search_hints, site_path=site_path, callbacks=[public_api.explicit_package_contents_filter], ) doc_generator.build(output_dir) logging.info('Output docs to: %s', output_dir) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') gen_api_docs( code_url_prefix=FLAGS.code_url_prefix, site_path=FLAGS.site_path, output_dir=FLAGS.output_dir, project_short_name=PROJECT_SHORT_NAME, project_full_name=PROJECT_FULL_NAME, search_hints=FLAGS.search_hints) if __name__ == '__main__': flags.mark_flag_as_required('output_dir') app.run(main)
3,850
31.091667
99
py
models
models-master/official/utils/misc/keras_utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for the Keras implementations of models.""" import multiprocessing import os import time from absl import logging import tensorflow as tf from tensorflow.python.eager import monitoring global_batch_size_gauge = monitoring.IntGauge( '/tensorflow/training/global_batch_size', 'TF training global batch size') first_batch_time_gauge = monitoring.IntGauge( '/tensorflow/training/first_batch', 'TF training start/end time for first batch (unix epoch time in us.', 'type') first_batch_start_time = first_batch_time_gauge.get_cell('start') first_batch_end_time = first_batch_time_gauge.get_cell('end') class BatchTimestamp(object): """A structure to store batch time stamp.""" def __init__(self, batch_index, timestamp): self.batch_index = batch_index self.timestamp = timestamp def __repr__(self): return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format( self.batch_index, self.timestamp) class TimeHistory(tf.keras.callbacks.Callback): """Callback for Keras models.""" def __init__(self, batch_size, log_steps, initial_step=0, logdir=None): """Callback for logging performance. Args: batch_size: Total batch size. log_steps: Interval of steps between logging of batch level stats. initial_step: Optional, initial step. logdir: Optional directory to write TensorBoard summaries. """ # TODO(wcromar): remove this parameter and rely on `logs` parameter of # on_train_batch_end() self.batch_size = batch_size super(TimeHistory, self).__init__() self.log_steps = log_steps self.last_log_step = initial_step self.steps_before_epoch = initial_step self.steps_in_epoch = 0 self.start_time = None global_batch_size_gauge.get_cell().set(batch_size) if logdir: self.summary_writer = tf.summary.create_file_writer(logdir) else: self.summary_writer = None # Logs start of step 1 then end of each step based on log_steps interval. self.timestamp_log = [] # Records the time each epoch takes to run from start to finish of epoch. self.epoch_runtime_log = [] @property def global_steps(self): """The current 1-indexed global step.""" return self.steps_before_epoch + self.steps_in_epoch @property def average_steps_per_second(self): """The average training steps per second across all epochs.""" return self.global_steps / sum(self.epoch_runtime_log) @property def average_examples_per_second(self): """The average number of training examples per second across all epochs.""" return self.average_steps_per_second * self.batch_size def get_examples_per_sec(self, warmup=1): """Calculates examples/sec through timestamp_log and skip warmup period.""" # First entry in timestamp_log is the start of the step 1. The rest of the # entries are the end of each step recorded. time_log = self.timestamp_log seconds = time_log[-1].timestamp - time_log[warmup].timestamp steps = time_log[-1].batch_index - time_log[warmup].batch_index return self.batch_size * steps / seconds def get_startup_time(self, start_time_sec): return self.timestamp_log[0].timestamp - start_time_sec def on_train_end(self, logs=None): self.train_finish_time = time.time() if self.summary_writer: self.summary_writer.flush() def on_epoch_begin(self, epoch, logs=None): self.epoch_start = time.time() def on_batch_begin(self, batch, logs=None): if not self.start_time: self.start_time = time.time() if not first_batch_start_time.value(): first_batch_start_time.set(int(self.start_time * 1000000)) # Record the timestamp of the first global step if not self.timestamp_log: self.timestamp_log.append( BatchTimestamp(self.global_steps, self.start_time)) def on_batch_end(self, batch, logs=None): """Records elapse time of the batch and calculates examples per second.""" if not first_batch_end_time.value(): first_batch_end_time.set(int(time.time() * 1000000)) self.steps_in_epoch = batch + 1 steps_since_last_log = self.global_steps - self.last_log_step if steps_since_last_log >= self.log_steps: now = time.time() elapsed_time = now - self.start_time steps_per_second = steps_since_last_log / elapsed_time examples_per_second = steps_per_second * self.batch_size self.timestamp_log.append(BatchTimestamp(self.global_steps, now)) logging.info( 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' 'and %d', elapsed_time, examples_per_second, self.last_log_step, self.global_steps) if self.summary_writer: with self.summary_writer.as_default(): tf.summary.scalar('steps_per_second', steps_per_second, self.global_steps) tf.summary.scalar('examples_per_second', examples_per_second, self.global_steps) self.last_log_step = self.global_steps self.start_time = None def on_epoch_end(self, epoch, logs=None): epoch_run_time = time.time() - self.epoch_start self.epoch_runtime_log.append(epoch_run_time) self.steps_before_epoch += self.steps_in_epoch self.steps_in_epoch = 0 class SimpleCheckpoint(tf.keras.callbacks.Callback): """Keras callback to save tf.train.Checkpoints.""" def __init__(self, checkpoint_manager): super(SimpleCheckpoint, self).__init__() self.checkpoint_manager = checkpoint_manager def on_epoch_end(self, epoch, logs=None): step_counter = self.checkpoint_manager._step_counter.numpy() # pylint: disable=protected-access self.checkpoint_manager.save(checkpoint_number=step_counter) def set_session_config(enable_xla=False): """Sets the session config.""" if enable_xla: tf.config.optimizer.set_jit(True) # TODO(hongkuny): remove set_config_v2 globally. set_config_v2 = set_session_config def set_gpu_thread_mode_and_count(gpu_thread_mode, datasets_num_private_threads, num_gpus, per_gpu_thread_count): """Set GPU thread mode and count, and adjust dataset threads count.""" cpu_count = multiprocessing.cpu_count() logging.info('Logical CPU cores: %s', cpu_count) # Allocate private thread pool for each GPU to schedule and launch kernels per_gpu_thread_count = per_gpu_thread_count or 2 os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT']) logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE']) # Limit data preprocessing threadpool to CPU cores minus number of total GPU # private threads and memory copy threads. total_gpu_thread_count = per_gpu_thread_count * num_gpus num_runtime_threads = num_gpus if not datasets_num_private_threads: datasets_num_private_threads = min( cpu_count - total_gpu_thread_count - num_runtime_threads, num_gpus * 8) logging.info('Set datasets_num_private_threads to %s', datasets_num_private_threads)
7,783
35.716981
100
py
models
models-master/official/utils/flags/_performance.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Register flags for optimizing performance.""" import multiprocessing from absl import flags # pylint: disable=g-bad-import-order import tensorflow as tf # pylint: disable=g-bad-import-order from official.utils.flags._conventions import help_wrap # Map string to TensorFlow dtype DTYPE_MAP = { "fp16": tf.float16, "bf16": tf.bfloat16, "fp32": tf.float32, } def get_tf_dtype(flags_obj): if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite": # If the graph_rewrite is used, we build the graph with fp32, and let the # graph rewrite change ops to fp16. return tf.float32 return DTYPE_MAP[flags_obj.dtype] def get_loss_scale(flags_obj, default_for_fp16): dtype = get_tf_dtype(flags_obj) if flags_obj.loss_scale == "dynamic": return flags_obj.loss_scale elif flags_obj.loss_scale is not None: return float(flags_obj.loss_scale) elif dtype == tf.float32 or dtype == tf.bfloat16: return 1 # No loss scaling is needed for fp32 else: assert dtype == tf.float16 return default_for_fp16 def define_performance(num_parallel_calls=False, inter_op=False, intra_op=False, synthetic_data=False, max_train_steps=False, dtype=False, all_reduce_alg=False, num_packs=False, tf_gpu_thread_mode=False, datasets_num_private_threads=False, datasets_num_parallel_batches=False, fp16_implementation=False, loss_scale=False, tf_data_experimental_slack=False, enable_xla=False, training_dataset_cache=False): """Register flags for specifying performance tuning arguments. Args: num_parallel_calls: Create a flag to specify parallelism of data loading. inter_op: Create a flag to allow specification of inter op threads. intra_op: Create a flag to allow specification of intra op threads. synthetic_data: Create a flag to allow the use of synthetic data. max_train_steps: Create a flags to allow specification of maximum number of training steps dtype: Create flags for specifying dtype. all_reduce_alg: If set forces a specific algorithm for multi-gpu. num_packs: If set provides number of packs for MirroredStrategy's cross device ops. tf_gpu_thread_mode: gpu_private triggers us of private thread pool. datasets_num_private_threads: Number of private threads for datasets. datasets_num_parallel_batches: Determines how many batches to process in parallel when using map and batch from tf.data. fp16_implementation: Create fp16_implementation flag. loss_scale: Controls the loss scaling, normally for mixed-precision training. Can only be turned on if dtype is also True. tf_data_experimental_slack: Determines whether to enable tf.data's `experimental_slack` option. enable_xla: Determines if XLA (auto clustering) is turned on. training_dataset_cache: Whether to cache the training dataset on workers. Typically used to improve training performance when training data is in remote storage and can fit into worker memory. Returns: A list of flags for core.py to marks as key flags. """ key_flags = [] if num_parallel_calls: flags.DEFINE_integer( name="num_parallel_calls", short_name="npc", default=multiprocessing.cpu_count(), help=help_wrap("The number of records that are processed in parallel " "during input processing. This can be optimized per " "data set but for generally homogeneous data sets, " "should be approximately the number of available CPU " "cores. (default behavior)")) if inter_op: flags.DEFINE_integer( name="inter_op_parallelism_threads", short_name="inter", default=0, help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. " "See TensorFlow config.proto for details.")) if intra_op: flags.DEFINE_integer( name="intra_op_parallelism_threads", short_name="intra", default=0, help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. " "See TensorFlow config.proto for details.")) if synthetic_data: flags.DEFINE_bool( name="use_synthetic_data", short_name="synth", default=False, help=help_wrap( "If set, use fake data (zeroes) instead of a real dataset. " "This mode is useful for performance debugging, as it removes " "input processing steps, but will not learn anything.")) if max_train_steps: flags.DEFINE_integer( name="max_train_steps", short_name="mts", default=None, help=help_wrap( "The model will stop training if the global_step reaches this " "value. If not set, training will run until the specified number " "of epochs have run as usual. It is generally recommended to set " "--train_epochs=1 when using this flag.")) if dtype: flags.DEFINE_enum( name="dtype", short_name="dt", default="fp32", enum_values=DTYPE_MAP.keys(), help=help_wrap("The TensorFlow datatype used for calculations. " "For 16-bit dtypes, variables and certain ops will " "still be float32 for numeric stability.")) if loss_scale: flags.DEFINE_string( name="loss_scale", short_name="ls", default=None, help=help_wrap( "The amount to scale the loss by when --dtype=fp16. This can be " "an int/float or the string 'dynamic'. Before gradients are " "computed, the loss is multiplied by the loss scale, making all " "gradients loss_scale times larger. To adjust for this, " "gradients are divided by the loss scale before being applied to " "variables. This is mathematically equivalent to training " "without a loss scale, but the loss scale helps avoid some " "intermediate gradients from underflowing to zero. The default " "is 'dynamic', which dynamic determines the optimal loss scale " "during training.")) # pylint: disable=unused-variable @flags.validator( flag_name="loss_scale", message="loss_scale should be a positive int/float or the string " "'dynamic'.") def _check_loss_scale(loss_scale): """Validator to check the loss scale flag is valid.""" if loss_scale is None: return True # null case is handled in get_loss_scale() if loss_scale == "dynamic": return True try: loss_scale = float(loss_scale) except ValueError: return False return loss_scale > 0 # pylint: enable=unused-variable if fp16_implementation: flags.DEFINE_enum( name="fp16_implementation", default="keras", enum_values=("keras", "graph_rewrite"), help=help_wrap( "When --dtype=fp16, how fp16 should be implemented. This has no " "impact on correctness. 'keras' uses the " "tf.keras.mixed_precision API. 'graph_rewrite' uses the " "tf.compat.v1.mixed_precision." "enable_mixed_precision_graph_rewrite API.")) @flags.multi_flags_validator( ["fp16_implementation", "dtype", "loss_scale"]) def _check_fp16_implementation(flags_dict): """Validator to check fp16_implementation flag is valid.""" if (flags_dict["fp16_implementation"] == "graph_rewrite" and flags_dict["dtype"] != "fp16"): raise flags.ValidationError("--fp16_implementation should not be " "specified unless --dtype=fp16") return True if all_reduce_alg: flags.DEFINE_string( name="all_reduce_alg", short_name="ara", default=None, help=help_wrap("Defines the algorithm to use for performing all-reduce." "When specified with MirroredStrategy for single " "worker, this controls " "tf.contrib.distribute.AllReduceCrossTowerOps. When " "specified with MultiWorkerMirroredStrategy, this " "controls " "tf.distribute.experimental.CollectiveCommunication; " "valid options are `ring` and `nccl`.")) if num_packs: flags.DEFINE_integer( name="num_packs", default=1, help=help_wrap("Sets `num_packs` in the cross device ops used in " "MirroredStrategy. For details, see " "tf.distribute.NcclAllReduce.")) if tf_gpu_thread_mode: flags.DEFINE_string( name="tf_gpu_thread_mode", short_name="gt_mode", default=None, help=help_wrap( "Whether and how the GPU device uses its own threadpool.")) flags.DEFINE_integer( name="per_gpu_thread_count", short_name="pgtc", default=0, help=help_wrap("The number of threads to use for GPU. Only valid when " "tf_gpu_thread_mode is not global.")) if datasets_num_private_threads: flags.DEFINE_integer( name="datasets_num_private_threads", default=None, help=help_wrap( "Number of threads for a private threadpool created for all" "datasets computation..")) if datasets_num_parallel_batches: flags.DEFINE_integer( name="datasets_num_parallel_batches", default=None, help=help_wrap( "Determines how many batches to process in parallel when using " "map and batch from tf.data.")) if training_dataset_cache: flags.DEFINE_boolean( name="training_dataset_cache", default=False, help=help_wrap( "Determines whether to cache the training dataset on workers. " "Typically used to improve training performance when training " "data is in remote storage and can fit into worker memory.")) if tf_data_experimental_slack: flags.DEFINE_boolean( name="tf_data_experimental_slack", default=False, help=help_wrap( "Whether to enable tf.data's `experimental_slack` option.")) if enable_xla: flags.DEFINE_boolean( name="enable_xla", default=False, help="Whether to enable XLA auto jit compilation") return key_flags
11,566
38.210169
80
py
models
models-master/official/modeling/tf_utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common TF utilities.""" import functools import inspect import six import tensorflow as tf from tensorflow.python.util import deprecation from official.modeling import activations @deprecation.deprecated( None, "tf.keras.layers.Layer supports multiple positional args and kwargs as " "input tensors. pack/unpack inputs to override __call__ is no longer " "needed.") def pack_inputs(inputs): """Pack a list of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is None, replace it with a special constant tensor. """ inputs = tf.nest.flatten(inputs) outputs = [] for x in inputs: if x is None: outputs.append(tf.constant(0, shape=[], dtype=tf.int32)) else: outputs.append(x) return tuple(outputs) @deprecation.deprecated( None, "tf.keras.layers.Layer supports multiple positional args and kwargs as " "input tensors. pack/unpack inputs to override __call__ is no longer " "needed.") def unpack_inputs(inputs): """unpack a tuple of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is a special constant tensor, replace it with None. """ inputs = tf.nest.flatten(inputs) outputs = [] for x in inputs: if is_special_none_tensor(x): outputs.append(None) else: outputs.append(x) x = tuple(outputs) # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check # from triggering. if len(x) == 1: return x[0] return tuple(outputs) def is_special_none_tensor(tensor): """Checks if a tensor is a special None Tensor.""" return tensor.shape.ndims == 0 and tensor.dtype == tf.int32 def get_activation(identifier, use_keras_layer=False, **kwargs): """Maps an identifier to a Python function, e.g., "relu" => `tf.nn.relu`. It checks string first and if it is one of customized activation not in TF, the corresponding activation will be returned. For non-customized activation names and callable identifiers, always fallback to tf.keras.activations.get. Prefers using keras layers when use_keras_layer=True. Now it only supports 'relu', 'linear', 'identity', 'swish', 'mish', 'leaky_relu', and 'gelu'. Args: identifier: String name of the activation function or callable. use_keras_layer: If True, use keras layer if identifier is allow-listed. **kwargs: Keyword arguments to use to instantiate an activation function. Available only for 'leaky_relu' and 'gelu' when using keras layers. For example: get_activation('leaky_relu', use_keras_layer=True, alpha=0.1) Returns: A Python function corresponding to the activation function or a keras activation layer when use_keras_layer=True. """ if isinstance(identifier, six.string_types): identifier = str(identifier).lower() if use_keras_layer: keras_layer_allowlist = { "relu": "relu", "linear": "linear", "identity": "linear", "swish": "swish", "sigmoid": "sigmoid", "relu6": tf.nn.relu6, "leaky_relu": functools.partial(tf.nn.leaky_relu, **kwargs), "hard_swish": activations.hard_swish, "hard_sigmoid": activations.hard_sigmoid, "mish": activations.mish, "gelu": functools.partial(tf.nn.gelu, **kwargs), } if identifier in keras_layer_allowlist: return tf.keras.layers.Activation(keras_layer_allowlist[identifier]) name_to_fn = { "gelu": activations.gelu, "simple_swish": activations.simple_swish, "hard_swish": activations.hard_swish, "relu6": activations.relu6, "hard_sigmoid": activations.hard_sigmoid, "identity": activations.identity, "mish": activations.mish, } if identifier in name_to_fn: return tf.keras.activations.get(name_to_fn[identifier]) return tf.keras.activations.get(identifier) def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: raise ValueError( "For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not " "equal to the expected tensor rank `%s`" % (name, actual_rank, str(tensor.shape), str(expected_rank))) def safe_mean(losses): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total = tf.reduce_sum(losses) num_elements = tf.cast(tf.size(losses), dtype=losses.dtype) return tf.math.divide_no_nan(total, num_elements) def get_replica_id(): """Gets replica id depending on the environment.""" context = tf.distribute.get_replica_context() if context is not None: return context.replica_id_in_sync_group else: raise RuntimeError("Unknown replica context. The `get_replica_id` method " "relies on TF 2.x tf.distribute API.") def cross_replica_concat(value, axis, name="cross_replica_concat"): """Concatenates the given `value` across (GPU/TPU) cores, along `axis`. In general, each core ("replica") will pass a replica-specific value as `value` (corresponding to some element of a data-parallel computation taking place across replicas). The resulting concatenated `Tensor` will have the same shape as `value` for all dimensions except `axis`, where it will be larger by a factor of the number of replicas. It will also have the same `dtype` as `value`. The position of a given replica's `value` within the resulting concatenation is determined by that replica's replica ID. For example: With `value` for replica 0 given as 0 0 0 0 0 0 and `value` for replica 1 given as 1 1 1 1 1 1 the resulting concatenation along axis 0 will be 0 0 0 0 0 0 1 1 1 1 1 1 and this result will be identical across all replicas. Note that this API only works in TF2 with `tf.distribute`. Args: value: The `Tensor` to concatenate across replicas. Each replica will have a different value for this `Tensor`, and these replica-specific values will be concatenated. axis: The axis along which to perform the concatenation as a Python integer (not a `Tensor`). E.g., `axis=0` to concatenate along the batch dimension. name: A name for the operation (used to create a name scope). Returns: The result of concatenating `value` along `axis` across replicas. Raises: RuntimeError: when the batch (0-th) dimension is None. """ with tf.name_scope(name): context = tf.distribute.get_replica_context() # Typically this could be hit only if the tensor is derived from a # dataset with finite epochs and drop_remainder=False, where the last # batch could of different batch size and then the dim-0 is of dynamic # shape. if value.shape.as_list()[0] is None: raise RuntimeError(f"{value} has unknown batch.") return context.all_gather(value, axis=axis) def clone_initializer(initializer): # Keras initializer is going to be stateless, which mean reusing the same # initializer will produce same init value when the shapes are the same. if isinstance(initializer, tf.keras.initializers.Initializer): return initializer.__class__.from_config(initializer.get_config()) # When the input is string/dict or other serialized configs, caller will # create a new keras Initializer instance based on that, and we don't need to # do anything return initializer def serialize_keras_object(obj): if hasattr(tf.keras.utils, "legacy"): return tf.keras.utils.legacy.serialize_keras_object(obj) else: return tf.keras.utils.serialize_keras_object(obj) def deserialize_keras_object( config, module_objects=None, custom_objects=None, printable_module_name=None ): if hasattr(tf.keras.utils, "legacy"): return tf.keras.utils.legacy.deserialize_keras_object( config, custom_objects, module_objects, printable_module_name ) else: return tf.keras.utils.deserialize_keras_object( config, custom_objects, module_objects, printable_module_name ) def serialize_layer(layer, use_legacy_format=False): if ( "use_legacy_format" in inspect.getfullargspec(tf.keras.layers.serialize).args ): return tf.keras.layers.serialize(layer, use_legacy_format=use_legacy_format) else: return tf.keras.layers.serialize(layer) def serialize_initializer(initializer, use_legacy_format=False): if ( "use_legacy_format" in inspect.getfullargspec(tf.keras.initializers.serialize).args ): return tf.keras.initializers.serialize( initializer, use_legacy_format=use_legacy_format ) else: return tf.keras.initializers.serialize(initializer) def serialize_regularizer(regularizer, use_legacy_format=False): if ( "use_legacy_format" in inspect.getfullargspec(tf.keras.regularizers.serialize).args ): return tf.keras.regularizers.serialize( regularizer, use_legacy_format=use_legacy_format ) else: return tf.keras.regularizers.serialize(regularizer) def serialize_constraint(constraint, use_legacy_format=False): if ( "use_legacy_format" in inspect.getfullargspec(tf.keras.constraints.serialize).args ): return tf.keras.constraints.serialize( constraint, use_legacy_format=use_legacy_format ) else: return tf.keras.constraints.serialize(constraint) def serialize_activation(activation, use_legacy_format=False): if ( "use_legacy_format" in inspect.getfullargspec(tf.keras.activations.serialize).args ): return tf.keras.activations.serialize( activation, use_legacy_format=use_legacy_format ) else: return tf.keras.activations.serialize(activation)
12,146
31.565684
80
py
models
models-master/official/modeling/performance.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions and classes related to training performance.""" from absl import logging import tensorflow as tf def configure_optimizer(optimizer, use_float16=False, loss_scale=None, use_graph_rewrite=None): """Configures optimizer object with performance options.""" if use_graph_rewrite is not None: logging.warning('`use_graph_rewrite` is deprecated inside ' '`configure_optimizer`. Please remove the usage.') del use_graph_rewrite if use_float16: if loss_scale in (None, 'dynamic'): optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer) else: # loss_scale is a number. We interpret that as a fixed loss scale. optimizer = tf.keras.mixed_precision.LossScaleOptimizer( optimizer, dynamic=False, initial_scale=loss_scale) return optimizer def set_mixed_precision_policy(dtype, loss_scale=None): """Sets the global `tf.keras.mixed_precision.Policy`.""" # TODO(b/191894773): Remove loss_scale argument assert loss_scale is None, ( 'The loss_scale argument must be None. The argument exists for ' 'historical reasons and will be removed soon.') if dtype == tf.float16: tf.keras.mixed_precision.set_global_policy('mixed_float16') elif dtype == tf.bfloat16: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16') elif dtype == tf.float32: tf.keras.mixed_precision.set_global_policy('float32') else: raise ValueError('Unexpected dtype: %s' % dtype)
2,159
39
74
py
models
models-master/official/modeling/grad_utils_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for grad_utils.""" import tensorflow as tf from official.modeling import grad_utils from official.modeling import performance class GradUtilsTest(tf.test.TestCase): def test_minimize(self): optimizer = tf.keras.optimizers.SGD(0.1) with tf.GradientTape() as tape: model = tf.keras.layers.Dense(2) outputs = model(tf.zeros((2, 2), tf.float32)) loss = tf.reduce_mean(outputs) grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss, model.trainable_variables) def test_minimize_fp16(self): optimizer = performance.configure_optimizer( tf.keras.optimizers.SGD(0.1), use_float16=True) performance.set_mixed_precision_policy(tf.float16) with tf.GradientTape() as tape: model = tf.keras.layers.Dense(2) outputs = model(tf.zeros((2, 2), tf.float16)) loss = tf.reduce_mean(outputs) grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss, model.trainable_variables) # Test other fp16 settings. def _clip_by_global_norm(grads_and_vars): grads, tvars = list(zip(*grads_and_vars)) (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) return zip(grads, tvars) with tf.GradientTape() as tape: model = tf.keras.layers.Dense(2) outputs = model(tf.zeros((2, 2), tf.float16)) loss = tf.reduce_mean(outputs) optimizer = performance.configure_optimizer( tf.keras.optimizers.SGD(0.1), use_float16=True, loss_scale=128) grad_utils.minimize_using_explicit_allreduce( tape, optimizer, loss, model.trainable_variables, pre_allreduce_callbacks=[_clip_by_global_norm], post_allreduce_callbacks=[_clip_by_global_norm]) def test_set_mixed_precision_policy(self): performance.set_mixed_precision_policy(tf.float16) performance.set_mixed_precision_policy(tf.bfloat16) performance.set_mixed_precision_policy(tf.float32) with self.assertRaises(ValueError): performance.set_mixed_precision_policy(tf.int32) if __name__ == '__main__': tf.test.main()
2,786
34.730769
75
py
models
models-master/official/modeling/grad_utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some gradient util functions to help users writing custom training loop.""" from absl import logging import tensorflow as tf def _filter_grads(grads_and_vars): """Filter out iterable with grad equal to None.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients do not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _filter_and_allreduce_gradients(grads_and_vars, allreduce_precision="float32", bytes_per_pack=0): """Filter None grads and then allreduce gradients in specified precision. This utils function is used when users intent to explicitly allreduce gradients and customize gradients operations before and after allreduce. The allreduced gradients are then passed to optimizer.apply_gradients( experimental_aggregate_gradients=False). Args: grads_and_vars: gradients and variables pairs. allreduce_precision: Whether to allreduce gradients in float32 or float16. bytes_per_pack: A non-negative integer. Breaks collective operations into packs of certain size. If it's zero, all gradients are in one pack. Returns: pairs of allreduced non-None gradients and variables. """ filtered_grads_and_vars = _filter_grads(grads_and_vars) (grads, variables) = zip(*filtered_grads_and_vars) if allreduce_precision == "float16": grads = [tf.cast(grad, "float16") for grad in grads] hints = tf.distribute.experimental.CommunicationOptions( bytes_per_pack=bytes_per_pack) allreduced_grads = tf.distribute.get_strategy( # pylint: disable=protected-access ).extended._replica_ctx_all_reduce(tf.distribute.ReduceOp.SUM, grads, hints) if allreduce_precision == "float16": allreduced_grads = [tf.cast(grad, "float32") for grad in allreduced_grads] return allreduced_grads, variables def _run_callbacks(callbacks, grads_and_vars): for callback in callbacks: grads_and_vars = callback(grads_and_vars) return grads_and_vars def minimize_using_explicit_allreduce(tape, optimizer, loss, trainable_variables, pre_allreduce_callbacks=None, post_allreduce_callbacks=None, allreduce_bytes_per_pack=0): """Minimizes loss for one step by updating `trainable_variables`. Minimizes loss for one step by updating `trainable_variables`. This explicitly performs gradient allreduce, instead of relying on implicit allreduce in optimizer.apply_gradients(). If training using FP16 mixed precision, explicit allreduce will aggregate gradients in FP16 format. For TPU and GPU training using FP32, explicit allreduce will aggregate gradients in FP32 format. Args: tape: An instance of `tf.GradientTape`. optimizer: An instance of `tf.keras.optimizers.Optimizer`. loss: the loss tensor. trainable_variables: A list of model Variables. pre_allreduce_callbacks: A list of callback functions that takes gradients and model variables pairs as input, manipulate them, and returns a new gradients and model variables pairs. The callback functions will be invoked in the list order and before gradients are allreduced. With mixed precision training, the pre_allreduce_allbacks will be applied on scaled_gradients. Default is no callbacks. post_allreduce_callbacks: A list of callback functions that takes gradients and model variables pairs as input, manipulate them, and returns a new gradients and model variables paris. The callback functions will be invoked in the list order and right before gradients are applied to variables for updates. Default is no callbacks. allreduce_bytes_per_pack: A non-negative integer. Breaks collective operations into packs of certain size. If it's zero, all gradients are in one pack. """ if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): # FP16 GPU code path with tape: scaled_loss = optimizer.get_scaled_loss(loss) scaled_grads = tape.gradient(scaled_loss, trainable_variables) grads_and_vars = zip(scaled_grads, trainable_variables) if pre_allreduce_callbacks: grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) (allreduced_scaled_grads, filtered_training_vars) = _filter_and_allreduce_gradients( grads_and_vars, allreduce_precision="float16", bytes_per_pack=allreduce_bytes_per_pack) allreduced_unscaled_grads = optimizer.get_unscaled_gradients( allreduced_scaled_grads) grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars) else: # TPU or FP32 GPU code path grads = tape.gradient(loss, trainable_variables) grads_and_vars = zip(grads, trainable_variables) if pre_allreduce_callbacks: grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) (allreduced_grads, filtered_training_vars) = _filter_and_allreduce_gradients( grads_and_vars, allreduce_precision="float32", bytes_per_pack=allreduce_bytes_per_pack) grads_and_vars = zip(allreduced_grads, filtered_training_vars) if post_allreduce_callbacks: grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars) optimizer.apply_gradients( grads_and_vars, experimental_aggregate_gradients=False)
6,730
43.282895
84
py
models
models-master/official/modeling/tf_utils_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tf_utils.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.modeling import tf_utils def all_strategy_combinations(): return combinations.combine( strategy=[ strategy_combinations.cloud_tpu_strategy, # TODO(b/285797201):disable multi-gpu tests due to hanging. # strategy_combinations.mirrored_strategy_with_two_gpus, ], mode='eager', ) class TFUtilsTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(all_strategy_combinations()) def test_cross_replica_concat(self, strategy): num_cores = strategy.num_replicas_in_sync shape = (2, 3, 4) def concat(axis): @tf.function def function(): replica_value = tf.fill(shape, tf_utils.get_replica_id()) return tf_utils.cross_replica_concat(replica_value, axis=axis) return function def expected(axis): values = [np.full(shape, i) for i in range(num_cores)] return np.concatenate(values, axis=axis) per_replica_results = strategy.run(concat(axis=0)) replica_0_result = per_replica_results.values[0].numpy() for value in per_replica_results.values[1:]: self.assertAllClose(value.numpy(), replica_0_result) self.assertAllClose(replica_0_result, expected(axis=0)) replica_0_result = strategy.run(concat(axis=1)).values[0].numpy() self.assertAllClose(replica_0_result, expected(axis=1)) replica_0_result = strategy.run(concat(axis=2)).values[0].numpy() self.assertAllClose(replica_0_result, expected(axis=2)) @combinations.generate(all_strategy_combinations()) def test_cross_replica_concat_gradient(self, strategy): num_cores = strategy.num_replicas_in_sync shape = (10, 5) @tf.function def function(): replica_value = tf.random.normal(shape) with tf.GradientTape() as tape: tape.watch(replica_value) concat_value = tf_utils.cross_replica_concat(replica_value, axis=0) output = tf.reduce_sum(concat_value) return tape.gradient(output, replica_value) per_replica_gradients = strategy.run(function) for gradient in per_replica_gradients.values: self.assertAllClose(gradient, num_cores * tf.ones(shape)) @parameterized.parameters(('relu', True), ('relu', False), ('leaky_relu', False), ('leaky_relu', True), ('mish', True), ('mish', False), ('gelu', True)) def test_get_activations(self, name, use_keras_layer): fn = tf_utils.get_activation(name, use_keras_layer) self.assertIsNotNone(fn) @combinations.generate(all_strategy_combinations()) def test_get_leaky_relu_layer(self, strategy): @tf.function def forward(x): fn = tf_utils.get_activation( 'leaky_relu', use_keras_layer=True, alpha=0.1) return strategy.run(fn, args=(x,)).values[0] got = forward(tf.constant([-1])) self.assertAllClose(got, tf.constant([-0.1])) if __name__ == '__main__': tf.test.main()
3,766
33.559633
76
py
models
models-master/official/modeling/fast_training/progressive/policies.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base ProgressivePolicy definition for progressive training. To write a progressive model, subclass ProgressivePolicy and implement its abstract methods to handle each training stage. """ import abc import dataclasses from typing import Any, Mapping from absl import logging import six import tensorflow as tf from official.common import streamz_counters from official.modeling.fast_training.progressive import utils from official.modeling.hyperparams import base_config @dataclasses.dataclass class ProgressiveConfig(base_config.Config): pass @six.add_metaclass(abc.ABCMeta) class ProgressivePolicy: """The APIs for handling progressive training stages. Attributes: cur_model: The model for the current progressive training stage. cur_train_dataset: The train dataset function for the current stage. cur_eval_dataset: The eval dataset function for the current stage. cur_optimizer: The optimizer for the current stage. cur_checkpoint_items: Items to be saved in and restored from checkpoints, for the progressive trainer. is_last_stage: Whether it is currently in the last stage. Interfaces: is_stage_advancing: Returns if progressive training is advancing to the next stage. update_pt_stage: Update progressive training stage. """ def __init__(self): """Initialize stage policy.""" self._cur_train_dataset = None self._cur_eval_dataset = None self._volatiles = utils.VolatileTrackable(optimizer=None, model=None) stage_id = 0 self._stage_id = tf.Variable( stage_id, trainable=False, dtype=tf.int64, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, shape=[]) self._volatiles.reassign_trackable( optimizer=self.get_optimizer(stage_id), model=self.get_model(stage_id, old_model=None)) # pytype: disable=wrong-arg-types # typed-keras streamz_counters.progressive_policy_creation_counter.get_cell( ).increase_by(1) def compute_stage_id(self, global_step: int) -> int: for stage_id in range(self.num_stages()): global_step -= self.num_steps(stage_id) if global_step < 0: return stage_id logging.error('Global step %d found no matching progressive stages. ' 'Default to the last stage.', global_step) return self.num_stages() - 1 @abc.abstractmethod def num_stages(self) -> int: """Return the total number of progressive stages.""" pass @abc.abstractmethod def num_steps(self, stage_id: int) -> int: """Return the total number of steps in this stage.""" pass @abc.abstractmethod def get_model(self, stage_id: int, old_model: tf.keras.Model = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras """Return model for this stage. For initialization, `old_model` = None.""" pass @abc.abstractmethod def get_optimizer(self, stage_id: int) -> tf.keras.optimizers.Optimizer: """Return optimizer for this stage.""" pass @abc.abstractmethod def get_train_dataset(self, stage_id: int) -> tf.data.Dataset: """Return training Dataset for this stage.""" pass @abc.abstractmethod def get_eval_dataset(self, stage_id: int) -> tf.data.Dataset: """Return evaluation Dataset for this stage.""" pass @property def cur_model(self) -> tf.keras.Model: return self._volatiles.model @property def cur_train_dataset(self) -> tf.data.Dataset: if self._cur_train_dataset is None: self._cur_train_dataset = self.get_train_dataset(self._stage_id.numpy()) return self._cur_train_dataset @property def cur_eval_dataset(self) -> tf.data.Dataset: if self._cur_eval_dataset is None: self._cur_eval_dataset = self.get_eval_dataset(self._stage_id.numpy()) return self._cur_eval_dataset @property def cur_optimizer(self) -> tf.keras.optimizers.Optimizer: return self._volatiles.optimizer @property def is_last_stage(self) -> bool: stage_id = self._stage_id.numpy() return stage_id >= self.num_stages() - 1 @property def cur_checkpoint_items(self) -> Mapping[str, Any]: return dict(stage_id=self._stage_id, volatiles=self._volatiles) def is_stage_advancing(self, global_step: int) -> bool: old_stage_id = self._stage_id.numpy() new_stage_id = self.compute_stage_id(global_step) return old_stage_id != new_stage_id def update_pt_stage(self, global_step: int, pass_old_model=True) -> None: """Update progressive training internal status. Call this after a training loop ends. Args: global_step: an integer scalar of the current global step. pass_old_model: whether to pass the old_model to get_model() function. This is set to False if the old_model is irrelevant (e.g, just a default model from stage 0). """ old_stage_id = self._stage_id.numpy() new_stage_id = self.compute_stage_id(global_step) logging.info('Switching stage from %d to %d', old_stage_id, new_stage_id) # Update stage id. self._stage_id.assign(new_stage_id) # Update dataset function. self._cur_train_dataset = None self._cur_eval_dataset = None # Update optimizer and model. new_optimizer = self.get_optimizer(new_stage_id) self._volatiles.reassign_trackable(optimizer=new_optimizer) new_model = self.get_model( new_stage_id, old_model=self.cur_model if pass_old_model else None) self._volatiles.reassign_trackable(model=new_model)
6,138
33.296089
127
py
models
models-master/official/modeling/fast_training/progressive/train_lib.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFM progressive training driver library. Compared to the common training driver, the only difference is that we use prog_trainer_lib.ProgressiveTrainer instead of the base trainer. """ # pytype: disable=attribute-error import os from typing import Any, Mapping, Tuple # Import libraries from absl import logging import orbit import tensorflow as tf from official.core import base_task from official.core import config_definitions from official.core import train_lib as base_train_lib from official.modeling.fast_training.progressive import trainer as prog_trainer_lib def run_experiment(distribution_strategy: tf.distribute.Strategy, task: base_task.Task, mode: str, params: config_definitions.ExperimentConfig, model_dir: str, run_post_eval: bool = False, save_summary: bool = True) \ -> Tuple[tf.keras.Model, Mapping[str, Any]]: """Runs train/eval configured by the experiment params. Args: distribution_strategy: A distribution distribution_strategy. task: A Task instance. mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval' or 'continuous_eval'. params: ExperimentConfig instance. model_dir: A 'str', a path to store model checkpoints and summaries. run_post_eval: Whether to run post eval once after training, metrics logs are returned. save_summary: Whether to save train and validation summary. Returns: A 2-tuple of (model, eval_logs). model: `tf.keras.Model` instance. eval_logs: returns eval metrics logs when run_post_eval is set to True, otherwise, returns {}. """ with distribution_strategy.scope(): logging.info('Running progressive trainer.') trainer = prog_trainer_lib.ProgressiveTrainer( params, task, ckpt_dir=model_dir, train='train' in mode, evaluate=('eval' in mode) or run_post_eval, checkpoint_exporter=base_train_lib.maybe_create_best_ckpt_exporter( params, model_dir)) if trainer.checkpoint: checkpoint_manager = tf.train.CheckpointManager( trainer.checkpoint, directory=model_dir, max_to_keep=params.trainer.max_to_keep, step_counter=trainer.global_step, checkpoint_interval=params.trainer.checkpoint_interval, init_fn=trainer.initialize) else: checkpoint_manager = None controller = orbit.Controller( strategy=distribution_strategy, trainer=trainer if 'train' in mode else None, evaluator=trainer, global_step=trainer.global_step, steps_per_loop=params.trainer.steps_per_loop, checkpoint_manager=checkpoint_manager, summary_dir=os.path.join(model_dir, 'train') if (save_summary) else None, eval_summary_dir=os.path.join(model_dir, 'validation') if (save_summary) else None, summary_interval=params.trainer.summary_interval if (save_summary) else None) logging.info('Starts to execute mode: %s', mode) with distribution_strategy.scope(): if mode == 'train': controller.train(steps=params.trainer.train_steps) elif mode == 'train_and_eval': controller.train_and_evaluate( train_steps=params.trainer.train_steps, eval_steps=params.trainer.validation_steps, eval_interval=params.trainer.validation_interval) elif mode == 'eval': controller.evaluate(steps=params.trainer.validation_steps) elif mode == 'continuous_eval': def timeout_fn(): if trainer.global_step.numpy() >= params.trainer.train_steps: return True return False controller.evaluate_continuously( steps=params.trainer.validation_steps, timeout=params.trainer.continuous_eval_timeout, timeout_fn=timeout_fn) else: raise NotImplementedError('The mode is not implemented: %s' % mode) if run_post_eval: with distribution_strategy.scope(): return trainer.model, trainer.evaluate( tf.convert_to_tensor(params.trainer.validation_steps)) else: return trainer.model, {}
4,740
36.330709
83
py
models
models-master/official/modeling/fast_training/progressive/trainer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the progressive trainer.""" # pylint: disable=g-direct-tensorflow-import import os from absl.testing import parameterized import orbit import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.core import config_definitions as cfg from official.modeling import optimization from official.modeling.fast_training.progressive import policies from official.modeling.fast_training.progressive import trainer as trainer_lib from official.nlp.configs import bert from official.utils.testing import mock_task def all_strategy_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ],) def get_exp_config(): return cfg.ExperimentConfig( task=cfg.TaskConfig( model=bert.PretrainerConfig()), trainer=trainer_lib.ProgressiveTrainerConfig( export_checkpoint=True, export_checkpoint_interval=1, export_only_final_stage_ckpt=False)) class TestPolicy(policies.ProgressivePolicy, mock_task.MockTask): """Just for testing purposes.""" def __init__(self, strategy, task_config, change_train_dataset=True): self._strategy = strategy self._change_train_dataset = change_train_dataset self._my_train_dataset = None mock_task.MockTask.__init__(self, params=task_config, logging_dir=None) policies.ProgressivePolicy.__init__(self) def num_stages(self) -> int: return 2 def num_steps(self, stage_id: int) -> int: return 2 if stage_id == 0 else 4 def get_model(self, stage_id: int, old_model: tf.keras.Model) -> tf.keras.Model: del stage_id, old_model return self.build_model() def get_optimizer(self, stage_id: int) -> tf.keras.optimizers.Optimizer: optimizer_type = 'sgd' if stage_id == 0 else 'adamw' optimizer_config = cfg.OptimizationConfig({ 'optimizer': {'type': optimizer_type}, 'learning_rate': {'type': 'constant'}}) opt_factory = optimization.OptimizerFactory(optimizer_config) return opt_factory.build_optimizer(opt_factory.build_learning_rate()) def get_train_dataset(self, stage_id: int) -> tf.data.Dataset: if not self._change_train_dataset and self._my_train_dataset: return self._my_train_dataset if self._strategy: self._my_train_dataset = orbit.utils.make_distributed_dataset( self._strategy, self._build_inputs, stage_id) else: self._my_train_dataset = self._build_inputs(stage_id) return self._my_train_dataset def get_eval_dataset(self, stage_id: int) -> tf.data.Dataset: if self._strategy: return orbit.utils.make_distributed_dataset( self._strategy, self._build_inputs, stage_id) return self._build_inputs(stage_id) def _build_inputs(self, stage_id): def dummy_data(_): batch_size = 2 if stage_id == 0 else 1 x = tf.zeros(shape=(batch_size, 2), dtype=tf.float32) label = tf.zeros(shape=(batch_size, 1), dtype=tf.float32) return x, label dataset = tf.data.Dataset.range(1) dataset = dataset.repeat() return dataset.map( dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) class TrainerTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(TrainerTest, self).setUp() self._config = get_exp_config() def create_test_trainer(self, distribution, model_dir, change_train_dataset): trainer = trainer_lib.ProgressiveTrainer( self._config, prog_task=TestPolicy( distribution, self._config.task, change_train_dataset), ckpt_dir=model_dir) return trainer @combinations.generate(all_strategy_combinations()) def test_checkpointing(self, distribution): model_dir = self.get_temp_dir() ckpt_file = os.path.join(model_dir, 'ckpt') with distribution.scope(): trainer = self.create_test_trainer(distribution, model_dir, True) self.assertFalse(trainer._task.is_last_stage) trainer.train(tf.convert_to_tensor(4, dtype=tf.int32)) self.assertTrue(trainer._task.is_last_stage) trainer.checkpoint.save(ckpt_file) trainer = self.create_test_trainer(distribution, model_dir, True) self.assertFalse(trainer._task.is_last_stage) trainer.checkpoint.restore(ckpt_file + '-1') self.assertTrue(trainer._task.is_last_stage) @combinations.generate(all_strategy_combinations()) def test_train_dataset(self, distribution): model_dir = self.get_temp_dir() with distribution.scope(): trainer = self.create_test_trainer(distribution, model_dir, True) # Using dataset of stage == 0 train_iter = tf.nest.map_structure(iter, trainer.train_dataset) train_data = train_iter.next()[0] if distribution.num_replicas_in_sync > 1: train_data = train_data.values[0] self.assertEqual(train_data.shape[0], 2) trainer.train(tf.convert_to_tensor(4, dtype=tf.int32)) # Using dataset of stage == 1 train_iter = tf.nest.map_structure(iter, trainer.train_dataset) train_data = train_iter.next()[0] if distribution.num_replicas_in_sync > 1: train_data = train_data.values[0] self.assertEqual(train_data.shape[0], 1) with self.assertRaises(SyntaxError): trainer.train_dataset = None @combinations.generate(all_strategy_combinations()) def test_train_dataset_no_switch(self, distribution): model_dir = self.get_temp_dir() with distribution.scope(): trainer = self.create_test_trainer(distribution, model_dir, False) trainer.train(tf.convert_to_tensor(2, dtype=tf.int32)) # _train_iter is not reset since the dataset is not changed. self.assertIsNotNone(trainer._train_iter) with distribution.scope(): trainer = self.create_test_trainer(distribution, model_dir, True) trainer.train(tf.convert_to_tensor(2, dtype=tf.int32)) # _train_iter is reset since the dataset changed. self.assertIsNone(trainer._train_iter) class TrainerWithMaskedLMTaskTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(TrainerWithMaskedLMTaskTest, self).setUp() self._config = get_exp_config() def create_test_trainer(self, distribution): trainer = trainer_lib.ProgressiveTrainer( self._config, prog_task=TestPolicy(distribution, self._config.task), ckpt_dir=self.get_temp_dir()) return trainer @combinations.generate(all_strategy_combinations()) def test_trainer_train(self, distribution): with distribution.scope(): trainer = self.create_test_trainer(distribution) logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', logs) self.assertIn('learning_rate', logs) @combinations.generate(all_strategy_combinations()) def test_trainer_validate(self, distribution): with distribution.scope(): trainer = self.create_test_trainer(distribution) logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('validation_loss', logs) self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync) @combinations.generate( combinations.combine( mixed_precision_dtype=['float32', 'bfloat16', 'float16'], loss_scale=[None, 'dynamic', 128, 256], )) def test_configure_optimizer(self, mixed_precision_dtype, loss_scale): config = cfg.ExperimentConfig( task=cfg.TaskConfig( model=bert.PretrainerConfig()), runtime=cfg.RuntimeConfig( mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale), trainer=trainer_lib.ProgressiveTrainerConfig( export_checkpoint=True, export_checkpoint_interval=1, export_only_final_stage_ckpt=False)) task = TestPolicy(None, config.task) trainer = trainer_lib.ProgressiveTrainer(config, task, self.get_temp_dir()) if mixed_precision_dtype != 'float16': self.assertIsInstance( trainer.optimizer, (tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD)) elif mixed_precision_dtype == 'float16' and loss_scale is None: self.assertIsInstance( trainer.optimizer, (tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD)) metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', metrics) if __name__ == '__main__': tf.test.main()
9,288
37.226337
80
py
models
models-master/official/modeling/fast_training/progressive/trainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Progressive Trainer implementation. The trainer implements the Orbit `StandardTrainable` and `StandardEvaluable` interfaces. Trainers inside this project should be interchangable and independent on model architectures and tasks. """ import dataclasses import os from typing import Any, Optional # Import libraries from absl import logging import gin import orbit import tensorflow as tf from official.core import base_task from official.core import base_trainer as trainer_lib from official.core import config_definitions from official.modeling.fast_training.progressive import policies from official.modeling.fast_training.progressive import utils ExperimentConfig = config_definitions.ExperimentConfig @dataclasses.dataclass class ProgressiveTrainerConfig(config_definitions.TrainerConfig): """Configuration for progressive trainer. Attributes: progressive: A task-specific config. Users can subclass ProgressiveConfig and define any task-specific settings in their subclass. export_checkpoint: A bool. Whether to export checkpoints in non-progressive manner (without the volatiles wrapper) such that your down-stream tasks can load checkpoints from a progressive trainer as if it is a regular checkpoint. export_checkpoint_interval: A bool. The number of steps between exporting checkpoints. If None (by default), will use the same value as TrainerConfig.checkpoint_interval. export_max_to_keep: The maximum number of exported checkpoints to keep. If None (by default), will use the same value as TrainerConfig.max_to_keep. export_only_final_stage_ckpt: A bool. Whether to just export checkpoints during the final progressive training stage. In other words, whether to not export small, partial models. In many cases, it is not meaningful to finetune a small, partial model in down-stream tasks. """ progressive: Optional[policies.ProgressiveConfig] = None export_checkpoint: bool = True export_checkpoint_interval: Optional[int] = None export_max_to_keep: Optional[int] = None export_only_final_stage_ckpt: bool = True @gin.configurable class ProgressiveTrainer(trainer_lib.Trainer): """Implements the progressive trainer shared for TensorFlow models.""" def __init__( self, config: ExperimentConfig, prog_task: base_task.Task, # also implemented ProgressivePolicy. ckpt_dir: str = '', train: bool = True, evaluate: bool = True, checkpoint_exporter: Any = None): """Initialize common trainer for TensorFlow models. Args: config: An `ExperimentConfig` instance specifying experiment config. prog_task: An instance both implemented policies.ProgressivePolicy and base_task.Task. ckpt_dir: Checkpoint directory. train: bool, whether or not this trainer will be used for training. default to True. evaluate: bool, whether or not this trainer will be used for evaluation. default to True. checkpoint_exporter: an object that has the `maybe_export_checkpoint` interface. """ # Gets the current distribution strategy. If not inside any strategy scope, # it gets a single-replica no-op strategy. self._strategy = tf.distribute.get_strategy() self._config = config self._runtime_options = trainer_lib.get_runtime_options(config) self._task = prog_task # Directory for non-progressive checkpoint self._export_ckpt_dir = os.path.join(ckpt_dir, 'exported_ckpts') tf.io.gfile.makedirs(self._export_ckpt_dir) self._export_ckpt_manager = None # Receive other checkpoint export, e.g, best checkpoint exporter. # TODO(lehou): unify the checkpoint exporting logic, although the default # setting does not use checkpoint_exporter. self._checkpoint_exporter = checkpoint_exporter self._global_step = orbit.utils.create_global_step() self._checkpoint = utils.CheckpointWithHooks( before_load_hook=self._update_pt_stage_from_ckpt, global_step=self.global_step, **self._task.cur_checkpoint_items) self._train_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) self._validation_loss = tf.keras.metrics.Mean( 'validation_loss', dtype=tf.float32) self._train_metrics = self.task.build_metrics( training=True) + self.model.metrics self._validation_metrics = self.task.build_metrics( training=False) + self.model.metrics if train: orbit.StandardTrainer.__init__( self, None, # Manage train_dataset by ourselves, not by StandardTrainer. options=orbit.StandardTrainerOptions( use_tf_while_loop=config.trainer.train_tf_while_loop, use_tf_function=config.trainer.train_tf_function)) if evaluate: orbit.StandardEvaluator.__init__( self, None, # Manage train_dataset by ourselves, not by StandardEvaluator. options=orbit.StandardEvaluatorOptions( use_tf_function=config.trainer.eval_tf_function)) @property def model(self): return self._task.cur_model @property def optimizer(self): return self._task.cur_optimizer # override @property def train_dataset(self): """Overriding StandardTrainer.train_dataset.""" return self._task.cur_train_dataset # override @train_dataset.setter def train_dataset(self, _): raise SyntaxError('Please do not set train_dataset. Progressive training ' 'relies on progressive policy to manager train dataset.') # override @property def eval_dataset(self): """Overriding StandardEvaluator.eval_dataset.""" return self._task.cur_eval_dataset # override @eval_dataset.setter def eval_dataset(self, _): raise SyntaxError('Please do not set eval_dataset. Progressive training ' 'relies on progressive policy to manager eval dataset.') def train_loop_end(self): """See base class.""" logs = {} for metric in self.train_metrics + [self.train_loss]: logs[metric.name] = metric.result() metric.reset_states() if callable(self.optimizer.learning_rate): logs['learning_rate'] = self.optimizer.learning_rate( self.optimizer.iterations) else: logs['learning_rate'] = self.optimizer.learning_rate self._maybe_export_non_progressive_checkpoint(self._export_ckpt_dir) if self._task.is_stage_advancing(self.global_step.numpy()): old_train_dataset = self.train_dataset # Update progressive properties self._task.update_pt_stage(self.global_step.numpy()) # Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will # rebuild the train and eval functions with the updated model. self._train_loop_fn = None self._eval_loop_fn = None if self.train_dataset != old_train_dataset: # Setting `self._train_iter` to None will rebuild the dataset iterator. self._train_iter = None # Setting `self._export_ckpt_manager` to None will rebuild the checkpoint # for exporting. self._export_ckpt_manager = None return logs def _update_pt_stage_from_ckpt(self, ckpt_file): """Update stage properties based on the global_step variable in a ckpt file. Before loading variables from a checkpoint file, we need to go to the correct stage and build corresponding model and optimizer, to make sure that we retore variables of the right model and optimizer. Args: ckpt_file: Checkpoint file that will be restored/read from. """ if not ckpt_file: return ckpt = tf.train.Checkpoint(global_step=self.global_step) ckpt.read(ckpt_file).expect_partial().assert_existing_objects_matched() if self._task.is_stage_advancing(self.global_step.numpy()): old_train_dataset = self.train_dataset # Update progressive properties self._task.update_pt_stage(self.global_step.numpy(), pass_old_model=False) # Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will # rebuild the train and eval functions with the updated model. self._train_loop_fn = None self._eval_loop_fn = None if self.train_dataset != old_train_dataset: # Setting `self._train_iter` to None will rebuild the dataset iterator. self._train_iter = None # Setting `self._export_ckpt_manager` to None will rebuild the checkpoint # for exporting. self._export_ckpt_manager = None def _maybe_export_non_progressive_checkpoint(self, export_ckpt_dir): """Export checkpoints in non-progressive format. This basically removes the wrapping of self._task.cur_checkpoint_items -- just save the model, optimizer, etc., directly. The purpose is to let your down-stream tasks to use these checkpoints. Args: export_ckpt_dir: A str. folder of exported checkpoints. """ if not self.config.trainer.export_checkpoint: logging.info('Not exporting checkpoints.') return if not self._task.is_last_stage and ( self.config.trainer.export_only_final_stage_ckpt): logging.info('Not exporting checkpoints until the last stage.') return if self._export_ckpt_manager is None: # Create a checkpoint object just now, to make sure we use # progressive_policy.cur_model and progressive_policy.cur_optimizer of the # current stage. if hasattr(self.model, 'checkpoint_items'): checkpoint_items = self.model.checkpoint_items else: checkpoint_items = {} checkpoint = tf.train.Checkpoint( global_step=self.global_step, model=self.model, optimizer=self.optimizer, **checkpoint_items) max_to_keep = self.config.trainer.export_max_to_keep or ( self.config.trainer.max_to_keep) checkpoint_interval = self.config.trainer.export_checkpoint_interval or ( self.config.trainer.checkpoint_interval) self._export_ckpt_manager = tf.train.CheckpointManager( checkpoint, directory=export_ckpt_dir, checkpoint_name='ckpt', step_counter=self.global_step, max_to_keep=max_to_keep, checkpoint_interval=checkpoint_interval, ) # Make sure we export the last checkpoint. last_checkpoint = ( self.global_step.numpy() == self._config.trainer.train_steps) checkpoint_path = self._export_ckpt_manager.save( checkpoint_number=self.global_step.numpy(), check_interval=not last_checkpoint) if checkpoint_path: logging.info('Checkpoints exported: %s.', checkpoint_path)
11,293
37.284746
80
py
models
models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stacking model horizontally.""" from absl import logging import numpy as np import tensorflow as tf def expand_vector(v: np.ndarray) -> np.ndarray: """Expands a vector with batch dimensions. Equivalent to expand_1_axis(v, epsilon=0.0, axis=-1) Args: v: A vector with shape [..., a]. Returns: A vector with shape [..., 2 * a]. """ return np.repeat(v, 2, axis=-1) def expand_1_axis(w: np.ndarray, epsilon: float, axis: int) -> np.ndarray: """Expands either the first dimension or the last dimension of w. If `axis = 0`, the following constraint will be satisfied: matmul(x, w) == matmul(expand_vector(x), expand_1_axis(w, epsilon=0.1, axis=0)) If `axis = -1`, the following constraint will be satisfied if `epsilon = 0.0`: expand_vector(matmul(x, w)) == 2 * matmul(x, expand_1_axis(w, epsilon=0.0, axis=-1)) Args: w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i]. epsilon: Symmetric Noise added to expanded tensor. axis: Must be either 0 or -1. Returns: Expanded numpy array. """ assert axis in (0, -1), ( "Only support expanding the first or the last dimension. " "Got: {}".format(axis)) rank = len(w.shape) d_w = np.random.normal(np.zeros_like(w), np.fabs(w) * epsilon, w.shape) d_w = np.repeat(d_w, 2, axis=axis) sign_flip = np.array([1, -1]) for _ in range(rank - 1): sign_flip = np.expand_dims(sign_flip, axis=-1 if axis == 0 else 0) sign_flip = np.tile(sign_flip, [w.shape[0]] + [1] * (rank - 2) + [w.shape[-1]]) d_w *= sign_flip w_expand = (np.repeat(w, 2, axis=axis) + d_w) / 2 return w_expand def expand_2_axes(w: np.ndarray, epsilon: float) -> np.ndarray: """Expands the first dimension and the last dimension of w. The following constraint will be satisfied: expand_vector(matmul(x, w)) == matmul(expand_vector(x), expand_2_axes(w)) Args: w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i]. epsilon: Symmetric Noise added to expanded tensor. Returns: Expanded numpy array. """ rank = len(w.shape) d_w = np.random.normal(np.zeros_like(w), np.fabs(w) * epsilon, w.shape) d_w = np.repeat(np.repeat(d_w, 2, axis=0), 2, axis=-1) sign_flip = np.array([1, -1]) for _ in range(rank - 1): sign_flip = np.expand_dims(sign_flip, axis=-1) sign_flip = np.tile(sign_flip, [w.shape[0]] + [1] * (rank - 2) + [w.shape[-1] * 2]) d_w *= sign_flip w_expand = (np.repeat(np.repeat(w, 2, axis=0), 2, axis=-1) + d_w) / 2 return w_expand def var_to_var(var_from: tf.Variable, var_to: tf.Variable, epsilon: float): """Expands a variable to another variable. Assume the shape of `var_from` is (a, b, ..., y, z), the shape of `var_to` can be (a, ..., z * 2), (a * 2, ..., z * 2), (a * 2, ..., z) If the shape of `var_to` is (a, ..., 2 * z): For any x, tf.matmul(x, var_to) ~= expand_vector(tf.matmul(x, var_from)) / 2 Not that there will be noise added to the left hand side, if epsilon != 0. If the shape of `var_to` is (2 * a, ..., z): For any x, tf.matmul(expand_vector(x), var_to) == tf.matmul(x, var_from) If the shape of `var_to` is (2 * a, ..., 2 * z): For any x, tf.matmul(expand_vector(x), var_to) == expand_vector(tf.matmul(expand_vector(x), var_from)) Args: var_from: input variable to expand. var_to: output variable. epsilon: the noise ratio that will be added, when splitting `var_from`. """ shape_from = var_from.shape shape_to = var_to.shape if shape_from == shape_to: var_to.assign(var_from) elif len(shape_from) == 1 and len(shape_to) == 1: var_to.assign(expand_vector(var_from.numpy())) elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] == shape_to[-1]: var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon, axis=0)) elif shape_from[0] == shape_to[0] and shape_from[-1] * 2 == shape_to[-1]: var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon, axis=-1)) elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] * 2 == shape_to[-1]: var_to.assign(expand_2_axes(var_from.numpy(), epsilon=epsilon)) else: raise ValueError("Shape not supported, {}, {}".format(shape_from, shape_to)) def model_to_model_2x_wide(model_from: tf.Module, model_to: tf.Module, epsilon: float = 0.1): """Expands a model to a wider version. Also makes sure that the output of the model is not changed after expanding. For example: ``` model_narrow = tf.keras.Sequential() model_narrow.add(tf.keras.Input(shape=(3,))) model_narrow.add(tf.keras.layers.Dense(4)) model_narrow.add(tf.keras.layers.Dense(1)) model_wide = tf.keras.Sequential() model_wide.add(tf.keras.Input(shape=(6,))) model_wide.add(tf.keras.layers.Dense(8)) model_wide.add(tf.keras.layers.Dense(1)) model_to_model_2x_wide(model_narrow, model_wide) assert model_narrow([[1, 2, 3]]) == model_wide([[1, 1, 2, 2, 3, 3]]) ``` We assume that `model_from` and `model_to` has the same architecture and only widths of them differ. Args: model_from: input model to expand. model_to: output model whose variables will be assigned expanded values according to `model_from`. epsilon: the noise ratio that will be added, when splitting `var_from`. """ for w_from, w_to in zip(model_from.trainable_variables, model_to.trainable_variables): logging.info("expanding %s %s to %s %s", w_from.name, w_from.shape, w_to.name, w_to.shape) var_to_var(w_from, w_to, epsilon=epsilon)
6,307
32.73262
80
py
models
models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tf2_utils_2x_wide.""" import numpy as np import tensorflow as tf from official.modeling.fast_training.experimental import tf2_utils_2x_wide class Tf2Utils2XWideTest(tf.test.TestCase): def test_expand_vector(self): x = np.array([1, 2]) self.assertAllClose(tf2_utils_2x_wide.expand_vector(x), np.array([1, 1, 2, 2])) def test_expand_matrix(self): x = np.array([[1, 2], [3, 4]]) x = tf2_utils_2x_wide.expand_2_axes(x, epsilon=0.1) self.assertAllClose(x[0, :] + x[1, :], np.array([1, 1, 2, 2])) self.assertAllClose(x[2, :] + x[3, :], np.array([3, 3, 4, 4])) def test_expand_matrix_axis_0(self): x = np.array([[1, 2], [3, 4]]) x = tf2_utils_2x_wide.expand_1_axis(x, axis=0, epsilon=0.1) self.assertAllClose(x[0, :] + x[1, :], np.array([1, 2])) self.assertAllClose(x[2, :] + x[3, :], np.array([3, 4])) def test_expand_matrix_axis_1(self): x = np.array([[1, 2], [3, 4]]) x = tf2_utils_2x_wide.expand_1_axis(x, axis=-1, epsilon=0.1) self.assertAllClose(x[:, 0] + x[:, 1], np.array([1, 3])) self.assertAllClose(x[:, 2] + x[:, 3], np.array([2, 4])) def test_expand_3d_tensor(self): x0 = np.array([10, 11]) x1 = np.array([10, 10, 11, 11]) w0 = np.random.rand(2, 2) w1 = tf2_utils_2x_wide.expand_2_axes(w0, epsilon=0.1) o0 = np.matmul(x0, w0) o1 = np.matmul(x1, w1) self.assertAllClose(np.repeat(o0, 2, axis=-1), o1) def test_expand_3d_tensor_axis_0(self): x0 = np.array([10, 11]) x1 = np.array([10, 10, 11, 11]) w0 = np.random.rand(2, 2) w1 = tf2_utils_2x_wide.expand_1_axis(w0, axis=0, epsilon=0.1) o0 = np.matmul(x0, w0) o1 = np.matmul(x1, w1) self.assertAllClose(o0, o1) def test_expand_3d_tensor_axis_2(self): x = np.array([10, 11]) w0 = np.random.rand(2, 2) w1 = tf2_utils_2x_wide.expand_1_axis(w0, axis=-1, epsilon=0.1) o0 = np.matmul(x, w0) o1 = np.matmul(x, w1) self.assertAllClose(o0, np.sum(o1.reshape(2, 2), axis=-1)) def test_end_to_end(self): """Covers expand_vector, expand_2_axes, and expand_1_axis.""" model_narrow = tf.keras.Sequential() model_narrow.add(tf.keras.Input(shape=(3,))) model_narrow.add(tf.keras.layers.Dense(4)) model_narrow.add(tf.keras.layers.Dense(4)) model_narrow.add(tf.keras.layers.Dense(1)) model_wide = tf.keras.Sequential() model_wide.add(tf.keras.Input(shape=(6,))) model_wide.add(tf.keras.layers.Dense(8)) model_wide.add(tf.keras.layers.Dense(8)) model_wide.add(tf.keras.layers.Dense(1)) x0 = np.array([[1, 2, 3]]) x1 = np.array([[1, 1, 2, 2, 3, 3]]) # Call model once to build variables first. _, _ = model_narrow(x0), model_wide(x1) tf2_utils_2x_wide.model_to_model_2x_wide( model_narrow, model_wide, epsilon=0.2) self.assertAllClose(model_narrow(x0), model_wide(x1), rtol=1e-05, atol=1e-05) if __name__ == "__main__": tf.test.main()
3,585
34.156863
74
py
models
models-master/official/modeling/optimization/optimizer_factory.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Optimizer factory class.""" from typing import Callable, List, Optional, Tuple, Union import gin import tensorflow as tf from official.modeling.optimization import slide_optimizer from official.modeling.optimization import adafactor_optimizer from official.modeling.optimization import ema_optimizer from official.modeling.optimization import lamb from official.modeling.optimization import lars from official.modeling.optimization import legacy_adamw from official.modeling.optimization import lr_schedule from official.modeling.optimization.configs import optimization_config as opt_cfg # Optimizer CLS to be used in both legacy and new path. SHARED_OPTIMIZERS = { 'sgd_experimental': tf.keras.optimizers.experimental.SGD, 'adam_experimental': tf.keras.optimizers.experimental.Adam, 'adamw': legacy_adamw.AdamWeightDecay, 'adamw_experimental': tf.keras.optimizers.experimental.AdamW, 'lamb': lamb.LAMB, 'lars': lars.LARS, 'slide': slide_optimizer.SLIDE, 'adafactor': adafactor_optimizer.Adafactor, } LEGACY_OPTIMIZERS_CLS = { 'sgd': tf.keras.optimizers.legacy.SGD, 'adam': tf.keras.optimizers.legacy.Adam, 'rmsprop': tf.keras.optimizers.legacy.RMSprop, 'adagrad': tf.keras.optimizers.legacy.Adagrad, } LEGACY_OPTIMIZERS_CLS.update(SHARED_OPTIMIZERS) NEW_OPTIMIZERS_CLS = { 'sgd': tf.keras.optimizers.experimental.SGD, 'adam': tf.keras.optimizers.experimental.Adam, 'rmsprop': tf.keras.optimizers.experimental.RMSprop, 'adagrad': tf.keras.optimizers.experimental.Adagrad, } NEW_OPTIMIZERS_CLS.update(SHARED_OPTIMIZERS) LR_CLS = { 'stepwise': lr_schedule.PiecewiseConstantDecayWithOffset, 'polynomial': lr_schedule.PolynomialDecayWithOffset, 'exponential': lr_schedule.ExponentialDecayWithOffset, 'cosine': lr_schedule.CosineDecayWithOffset, 'power': lr_schedule.DirectPowerDecay, 'power_linear': lr_schedule.PowerAndLinearDecay, 'power_with_offset': lr_schedule.PowerDecayWithOffset, 'step_cosine_with_offset': lr_schedule.StepCosineDecayWithOffset, } WARMUP_CLS = { 'linear': lr_schedule.LinearWarmup, 'polynomial': lr_schedule.PolynomialWarmUp } def register_optimizer_cls(key: str, optimizer_config_cls: Union[ tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, tf.keras.optimizers.experimental.Optimizer ], use_legacy_optimizer: bool = True): """Register customize optimizer cls. The user will still need to subclass data classes in configs.optimization_config to be used with OptimizerFactory. Args: key: A string to that the optimizer_config_cls is registered with. optimizer_config_cls: A class which inherits tf.keras.optimizers.Optimizer. use_legacy_optimizer: A boolean that indicates if using legacy optimizers. """ if use_legacy_optimizer: if key in LEGACY_OPTIMIZERS_CLS: raise ValueError('%s already registered in LEGACY_OPTIMIZERS_CLS.' % key) LEGACY_OPTIMIZERS_CLS[key] = optimizer_config_cls else: if key in NEW_OPTIMIZERS_CLS: raise ValueError('%s already registered in NEW_OPTIMIZERS_CLS.' % key) NEW_OPTIMIZERS_CLS[key] = optimizer_config_cls class OptimizerFactory: """Optimizer factory class. This class builds learning rate and optimizer based on an optimization config. To use this class, you need to do the following: (1) Define optimization config, this includes optimizer, and learning rate schedule. (2) Initialize the class using the optimization config. (3) Build learning rate. (4) Build optimizer. This is a typical example for using this class: ``` params = { 'optimizer': { 'type': 'sgd', 'sgd': {'momentum': 0.9} }, 'learning_rate': { 'type': 'stepwise', 'stepwise': {'boundaries': [10000, 20000], 'values': [0.1, 0.01, 0.001]} }, 'warmup': { 'type': 'linear', 'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01} } } opt_config = OptimizationConfig(params) opt_factory = OptimizerFactory(opt_config) lr = opt_factory.build_learning_rate() optimizer = opt_factory.build_optimizer(lr) ``` """ def __init__(self, config: opt_cfg.OptimizationConfig): """Initializing OptimizerFactory. Args: config: OptimizationConfig instance contain optimization config. """ self._config = config self._optimizer_config = config.optimizer.get() self._optimizer_type = config.optimizer.type self._use_ema = config.ema is not None self._ema_config = config.ema if self._optimizer_config is None: raise ValueError('Optimizer type must be specified') self._lr_config = config.learning_rate.get() self._lr_type = config.learning_rate.type if self._lr_type is None: raise ValueError('Learning rate type must be specified') self._warmup_config = config.warmup.get() self._warmup_type = config.warmup.type def build_learning_rate(self): """Build learning rate. Builds learning rate from config. Learning rate schedule is built according to the learning rate config. If learning rate type is consant, lr_config.learning_rate is returned. Returns: tf.keras.optimizers.schedules.LearningRateSchedule instance. If learning rate type is consant, lr_config.learning_rate is returned. """ if self._lr_type == 'constant': lr = self._lr_config.learning_rate else: lr = LR_CLS[self._lr_type](**self._lr_config.as_dict()) if self._warmup_config: lr = WARMUP_CLS[self._warmup_type](lr, **self._warmup_config.as_dict()) return lr @gin.configurable def build_optimizer( self, lr: Union[tf.keras.optimizers.schedules.LearningRateSchedule, float], gradient_aggregator: Optional[Callable[ [List[Tuple[tf.Tensor, tf.Tensor]]], List[Tuple[tf.Tensor, tf.Tensor]]]] = None, gradient_transformers: Optional[List[Callable[ [List[Tuple[tf.Tensor, tf.Tensor]]], List[Tuple[tf.Tensor, tf.Tensor]]]]] = None, postprocessor: Optional[Callable[[tf.keras.optimizers.Optimizer], tf.keras.optimizers.Optimizer]] = None, use_legacy_optimizer: bool = True): """Build optimizer. Builds optimizer from config. It takes learning rate as input, and builds the optimizer according to the optimizer config. Typically, the learning rate built using self.build_lr() is passed as an argument to this method. Args: lr: A floating point value, or a tf.keras.optimizers.schedules.LearningRateSchedule instance. gradient_aggregator: Optional function to overwrite gradient aggregation. gradient_transformers: Optional list of functions to use to transform gradients before applying updates to Variables. The functions are applied after gradient_aggregator. The functions should accept and return a list of (gradient, variable) tuples. clipvalue, clipnorm, global_clipnorm should not be set when gradient_transformers is passed. postprocessor: An optional function for postprocessing the optimizer. It takes an optimizer and returns an optimizer. use_legacy_optimizer: A boolean that indicates if using legacy optimizers. Returns: `tf.keras.optimizers.legacy.Optimizer` or `tf.keras.optimizers.experimental.Optimizer` instance. """ optimizer_dict = self._optimizer_config.as_dict() ## Delete clipnorm, clipvalue, global_clipnorm if None if optimizer_dict['clipnorm'] is None: del optimizer_dict['clipnorm'] if optimizer_dict['clipvalue'] is None: del optimizer_dict['clipvalue'] if optimizer_dict['global_clipnorm'] is None: del optimizer_dict['global_clipnorm'] optimizer_dict['learning_rate'] = lr if gradient_aggregator is not None: optimizer_dict['gradient_aggregator'] = gradient_aggregator if gradient_transformers is not None: optimizer_dict['gradient_transformers'] = gradient_transformers if use_legacy_optimizer: optimizer = LEGACY_OPTIMIZERS_CLS[self._optimizer_type](**optimizer_dict) else: if 'decay' in optimizer_dict: raise ValueError( '`decay` is deprecated in new Keras optimizer, please reflect the ' 'decay logic in `lr` or set `use_legacy_optimizer=True` to use the ' 'legacy optimizer.') optimizer = NEW_OPTIMIZERS_CLS[self._optimizer_type](**optimizer_dict) if self._use_ema: if not use_legacy_optimizer: raise ValueError( 'EMA can only work with the legacy optimizer, please set ' '`use_legacy_optimizer=True`.') optimizer = ema_optimizer.ExponentialMovingAverage( optimizer, **self._ema_config.as_dict()) if postprocessor: optimizer = postprocessor(optimizer) if isinstance(optimizer, tf.keras.optimizers.Optimizer): return optimizer # The following check makes sure the function won't break in older TF # version because of missing the experimental/legacy package. if hasattr(tf.keras.optimizers, 'experimental'): if isinstance(optimizer, tf.keras.optimizers.experimental.Optimizer): return optimizer if hasattr(tf.keras.optimizers, 'legacy'): if isinstance(optimizer, tf.keras.optimizers.legacy.Optimizer): return optimizer raise TypeError('OptimizerFactory.build_optimizer returning a ' 'non-optimizer object: {}'.format(optimizer))
10,475
38.235955
81
py