repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/pointpillars/modeling/featurizers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import featurizers
class FeaturizerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([32, 32], [16, 4, 2], 4, 2, 1),
([32, 16], [1, 3, 1], 2, 2, 3),
)
def test_network_creation(self, image_size, pillars_size, train_batch_size,
eval_batch_size, num_blocks):
num_channels = 3
h, w = image_size
n, _, _ = pillars_size
featurizer = featurizers.Featurizer(image_size, pillars_size,
train_batch_size, eval_batch_size,
num_blocks, num_channels)
# Train mode.
pillars = tf.keras.Input(shape=pillars_size, batch_size=train_batch_size)
indices = tf.keras.Input(
shape=[n, 2], batch_size=train_batch_size, dtype=tf.int32)
image = featurizer(pillars, indices, training=True)
self.assertAllEqual([train_batch_size, h, w, num_channels],
image.shape.as_list())
# Evaluation mode.
pillars = tf.keras.Input(shape=pillars_size, batch_size=eval_batch_size)
indices = tf.keras.Input(
shape=[n, 2], batch_size=eval_batch_size, dtype=tf.int32)
image = featurizer(pillars, indices, training=False)
self.assertAllEqual([eval_batch_size, h, w, num_channels],
image.shape.as_list())
# Test mode, batch size must be 1.
pillars = tf.keras.Input(shape=pillars_size, batch_size=1)
indices = tf.keras.Input(
shape=[n, 2], batch_size=1, dtype=tf.int32)
image = featurizer(pillars, indices, training=None)
self.assertAllEqual([1, h, w, num_channels],
image.shape.as_list())
def test_serialization(self):
kwargs = dict(
image_size=[4, 4],
pillars_size=[4, 5, 6],
train_batch_size=4,
eval_batch_size=2,
num_blocks=3,
num_channels=4,
kernel_regularizer=None,
)
net = featurizers.Featurizer(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = featurizers.Featurizer.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,974 | 35.280488 | 77 | py |
models | models-master/official/projects/pointpillars/tasks/pointpillars.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars task definition."""
import functools
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import task_factory
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.dataloaders import decoders
from official.projects.pointpillars.dataloaders import parsers
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.utils import utils
from official.vision.dataloaders import input_reader_factory
from official.vision.losses import focal_loss
from official.vision.losses import loss_utils
def pick_dataset_fn(file_type: str) -> Any:
if file_type == 'tfrecord':
return tf.data.TFRecordDataset
if file_type == 'tfrecord_compressed':
return functools.partial(tf.data.TFRecordDataset, compression_type='GZIP')
raise ValueError('Unrecognized file_type: {}'.format(file_type))
def get_batch_size_per_replica(global_batch_size: int) -> int:
"""Get batch size per accelerator replica."""
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if global_batch_size < num_replicas:
logging.warning('Global batch size is smaller than num replicas. '
'Set batch size per replica to 1.')
return 1
if global_batch_size % num_replicas != 0:
raise ValueError(
'global_batch_size {} is not a multiple of num_replicas {}'
.format(global_batch_size, num_replicas))
batch_size = int(global_batch_size / num_replicas)
return batch_size
@task_factory.register_task_cls(cfg.PointPillarsTask)
class PointPillarsTask(base_task.Task):
"""A single-replica view of training procedure."""
def __init__(self,
params: cfg.PointPillarsTask,
logging_dir: Optional[str] = None,
name: Optional[str] = None):
super().__init__(params, logging_dir, name)
self._model = None
self._attribute_heads = self.task_config.model.head.attribute_heads
def build_model(self) -> tf.keras.Model:
# Create only one model instance if this function is called multiple times.
if self._model is not None:
return self._model
pillars_config = self.task_config.model.pillars
input_specs = {
'pillars':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point)),
'indices':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars, 2), dtype='int32'),
}
train_batch_size = get_batch_size_per_replica(
self.task_config.train_data.global_batch_size)
eval_batch_size = get_batch_size_per_replica(
self.task_config.validation_data.global_batch_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
self._model = factory.build_pointpillars(
input_specs=input_specs,
model_config=self.task_config.model,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
l2_regularizer=l2_regularizer)
return self._model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Build input dataset."""
model_config = self.task_config.model
if (model_config.classes != 'all' and
model_config.num_classes != 2):
raise ValueError('Model num_classes must be 2 when not for all classes.')
decoder = decoders.ExampleDecoder(model_config.image, model_config.pillars)
image_size = [model_config.image.height, model_config.image.width]
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
anchor_labeler_config = model_config.anchor_labeler
parser = parsers.Parser(
classes=model_config.classes,
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes,
match_threshold=anchor_labeler_config.match_threshold,
unmatched_threshold=anchor_labeler_config.unmatched_threshold,
max_num_detections=model_config.detection_generator
.max_num_detections,
dtype=params.dtype,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def compute_attribute_losses(
self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: tf.Tensor) -> Mapping[str, float]:
"""Computes attribute loss."""
att_loss_fn = tf.keras.losses.Huber(
self.task_config.losses.huber_loss_delta,
reduction=tf.keras.losses.Reduction.SUM)
losses = {}
total_loss = 0.0
for head in self._attribute_heads:
if head.type != 'regression':
raise ValueError(f'Attribute type {head.type} not supported.')
y_true_att = loss_utils.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size)
y_pred_att = loss_utils.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size)
if head.name == 'heading':
# Direction aware loss, wrap the delta angle to [-pi, pi].
# Otherwise for a loss that is symmetric to direction (i.e., heading 0
# and pi are the same), we use a tf.sin transform.
delta = utils.wrap_angle_rad(y_pred_att - y_true_att)
loss = att_loss_fn(
y_true=tf.zeros_like(delta),
y_pred=delta,
sample_weight=box_sample_weight)
else:
loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
total_loss += loss
losses[head.name] = loss
losses['total'] = total_loss
return losses
def compute_losses(
self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Mapping[str, float]:
"""Build losses."""
params = self.task_config
cls_loss_fn = focal_loss.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM)
box_loss_fn = tf.keras.losses.Huber(
params.losses.huber_loss_delta,
reduction=tf.keras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = tf.reduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = loss_utils.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)
y_pred_cls = loss_utils.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = loss_utils.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = loss_utils.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
attribute_losses = self.compute_attribute_losses(outputs, labels,
box_sample_weight)
model_loss = (
cls_loss + box_loss * params.losses.box_loss_weight +
attribute_losses['total'] * params.losses.attribute_loss_weight)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss += reg_loss
total_loss = params.losses.loss_weight * total_loss
losses = {
'class_loss': cls_loss,
'box_loss': box_loss,
'attribute_loss': attribute_losses['total'],
'model_loss': model_loss,
'total_loss': total_loss,
}
for head in self._attribute_heads:
losses[head.name + '_loss'] = attribute_losses[head.name]
return losses
def build_metrics(self, training: bool = True) -> List[tf.metrics.Metric]:
"""Define metrics and how to calculate them."""
# train/validation loss metrics
loss_names = [
'class_loss', 'box_loss', 'attribute_loss', 'model_loss', 'total_loss'
]
for head in self._attribute_heads:
loss_names.append(head.name + '_loss')
metrics = []
for name in loss_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
# Use a separate metric for WOD validation.
if not training:
if self.task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific tensorflow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.projects.pointpillars.utils import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self._wod_metric = wod_detection_evaluator.create_evaluator(
self.task_config.model)
return metrics
def train_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[tf.metrics.Metric]] = None) -> Mapping[str, Any]:
"""Does forward and backward."""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(pillars=features['pillars'],
indices=features['indices'],
training=True)
losses = self.compute_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# Computes per-replica loss.
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# For updating trainer.train_loss
logs = {self.loss: losses['total_loss']}
# For updating trainer.train_metrics
if metrics:
for m in metrics:
m.update_state(losses[m.name])
return logs
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[tf.metrics.Metric]] = None) -> Mapping[str, Any]:
"""Validatation step."""
features, labels = inputs
outputs = model(pillars=features['pillars'],
indices=features['indices'],
image_shape=labels['image_shape'],
anchor_boxes=labels['anchor_boxes'],
training=False)
losses = self.compute_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# For updating trainer.validation_loss
logs = {self.loss: losses['total_loss']}
# For updating trainer.validation_metrics
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if self.task_config.use_wod_metrics:
logs.update(
{self._wod_metric.name: (labels['groundtruths'], outputs)})
return logs
def aggregate_logs(self,
state: Any = None,
step_outputs: Any = None) -> Any:
"""Called after each validation_step to update metrics."""
logging.log_every_n(logging.INFO,
'Aggregating metrics after one evaluation step.', 1000)
if self.task_config.use_wod_metrics:
if state is None:
self._wod_metric.reset_states()
self._wod_metric.update_state(step_outputs[self._wod_metric.name][0],
step_outputs[self._wod_metric.name][1])
if state is None:
state = True
return state
def reduce_aggregated_logs(self,
aggregated_logs: Any,
global_step: Optional[tf.Tensor] = None) -> Any:
"""Called after eval_end to calculate metrics."""
logging.info('Reducing aggregated metrics after one evaluation cycle.')
logs = {}
if self.task_config.use_wod_metrics:
logs.update(self._wod_metric.result())
return logs
| 15,302 | 38.440722 | 138 | py |
models | models-master/official/projects/pix2seq/modeling/pix2seq_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements A Language Modeling Framework for Object Detection.
Model paper: https://arxiv.org/abs/2109.10852
This module does not support Keras de/serialization. Please use
tf.train.Checkpoint for object based saving and loading and tf.saved_model.save
for graph serializaiton.
"""
import math
from typing import Any, List, Mapping, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.pix2seq.modeling import transformer
def get_shape(x):
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_variable_initializer(name=None):
if name is None:
return tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.02)
def add_seq_pos_emb(
self, pos_encoding, max_seq_len, dim, name_prefix=None, initializer=None
):
"""Add seq_pos_emb variable/tensor to model instance referenced by `self`."""
if name_prefix is None:
name_prefix = self.name
if initializer is None:
initializer = get_variable_initializer()
if pos_encoding == "learned":
self.seq_pos_emb = self.add_weight(
shape=(max_seq_len + 1, dim),
initializer=initializer,
name="%s/seq_pos_embedding" % name_prefix,
)
# (gunho) currently only 'learned' positional encoding is supported
elif pos_encoding == "sin_cos":
self.seq_pos_emb = None
else:
raise ValueError("Unknown pos encoding %s" % pos_encoding)
def add_vocab_token_emb(
self,
vocab_size,
dim,
shared_embedding,
output_bias,
name_prefix=None,
initializer=None,
):
"""Add token_embedding variable to model instance referenced by `self`."""
if name_prefix is None:
name_prefix = self.name
if initializer is None:
initializer = get_variable_initializer()
if shared_embedding:
self.token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/token_embedding" % name_prefix,
)
else:
self.inp_token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/inp_token_embedding" % name_prefix,
)
self.outp_token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/outp_token_embedding" % name_prefix,
)
if output_bias:
self.outp_bias = self.add_weight(
shape=[vocab_size],
initializer=initializer,
name="%s/outp_bias" % name_prefix,
)
def get_ar_mask(seq_len, dtype=tf.float32):
"""Get autoregressive causal mask so the model cannot attends to the future.
Args:
seq_len: a `int` or `int` tensor specifying the sequence length.
dtype: tf data type for the return tensor.
Returns:
tensor of shape [1, 1, seq_len, seq_len] with ones for locations to be
masked out.
"""
valid_locs = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=dtype), -1, 0
)
valid_locs = tf.reshape(valid_locs, [1, 1, seq_len, seq_len])
return 1.0 - valid_locs
def position_embedding_sine(
attention_mask,
num_pos_features=256,
temperature=10000.0,
normalize=True,
scale=2 * math.pi,
):
"""Sine-based positional embeddings for 2D images.
Args:
attention_mask: a `bool` Tensor specifying the size of the input image to
the Transformer and which elements are padded, of size [batch_size,
height, width]
num_pos_features: a `int` specifying the number of positional features,
should be equal to the hidden size of the Transformer network
temperature: a `float` specifying the temperature of the positional
embedding. Any type that is converted to a `float` can also be accepted.
normalize: a `bool` determining whether the positional embeddings should be
normalized between [0, scale] before application of the sine and cos
functions.
scale: a `float` if normalize is True specifying the scale embeddings before
application of the embedding function.
Returns:
embeddings: a `float` tensor of the same shape as input_tensor specifying
the positional embeddings based on sine features.
"""
if num_pos_features % 2 != 0:
raise ValueError(
"Number of embedding features (num_pos_features) must be even when "
"column and row embeddings are concatenated."
)
num_pos_features = num_pos_features // 2
# Produce row and column embeddings based on total size of the image
# <tf.float>[batch_size, height, width]
attention_mask = tf.cast(attention_mask, tf.float32)
row_embedding = tf.cumsum(attention_mask, 1)
col_embedding = tf.cumsum(attention_mask, 2)
if normalize:
eps = 1e-6
row_embedding = row_embedding / (row_embedding[:, -1:, :] + eps) * scale
col_embedding = col_embedding / (col_embedding[:, :, -1:] + eps) * scale
dim_t = tf.range(num_pos_features, dtype=row_embedding.dtype)
dim_t = tf.pow(temperature, 2 * (dim_t // 2) / num_pos_features)
# Creates positional embeddings for each row and column position
# <tf.float>[batch_size, height, width, num_pos_features]
pos_row = tf.expand_dims(row_embedding, -1) / dim_t
pos_col = tf.expand_dims(col_embedding, -1) / dim_t
pos_row = tf.stack(
[tf.sin(pos_row[:, :, :, 0::2]), tf.cos(pos_row[:, :, :, 1::2])], axis=4
)
pos_col = tf.stack(
[tf.sin(pos_col[:, :, :, 0::2]), tf.cos(pos_col[:, :, :, 1::2])], axis=4
)
final_shape = tf_utils.get_shape_list(pos_row)[:3] + [-1]
pos_row = tf.reshape(pos_row, final_shape)
pos_col = tf.reshape(pos_col, final_shape)
output = tf.concat([pos_row, pos_col], -1)
embeddings = tf.cast(output, tf.float32)
return embeddings
def top_logits(
logits: tf.Tensor, k: int = 0, p: float = 1.0, mask: float = -1e10
) -> tf.Tensor:
"""Remove low probability logits via masking.
Args:
logits: class logits in shape of (batch size, total_classes).
k: specifying top k largest logits to keep.
p: specifying a probability for finding a minimum set of largest logits to
keep, where their cumulative probability is no less than p (actually in
the following version, it is "...cumulative probability is the largest but
no more than p").
mask: an value that's used to replace logits that don't satisfy the keep
conditions.
Returns:
logits where low probability ones are replaced with mask.
"""
mask = tf.ones_like(logits) * mask
if k > 0:
min_logits = tf.nn.top_k(logits, k=k)[0][:, -1:]
logits = tf.where(logits < min_logits, mask, logits)
if p < 1.0:
sorted_logits = tf.sort(logits, direction="DESCENDING", axis=-1)
cum_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
min_logits = -tf.reduce_max(
tf.where(cum_probs <= p, -sorted_logits, mask), -1, keepdims=True
)
min_logits = tf.minimum(min_logits, sorted_logits[:, :1])
logits = tf.where(logits < min_logits, mask, logits)
return logits
class Pix2Seq(tf.keras.Model):
"""Pix2Seq model with Keras.
Pix2Seq consists of backbone, input token embedding, Pix2SeqTransformer.
"""
def __init__(
self,
backbone,
backbone_endpoint_name,
max_seq_len,
vocab_size,
hidden_size,
num_encoder_layers=6,
num_decoder_layers=6,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
**kwargs
):
super().__init__(**kwargs)
self._backbone = backbone
self._backbone_endpoint_name = backbone_endpoint_name
self._max_seq_len = max_seq_len
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
if hidden_size % 2 != 0:
raise ValueError("hidden_size must be a multiple of 2.")
self._dropout = tf.keras.layers.Dropout(self._drop_units)
self._stem_projection = tf.keras.layers.Dense(
self._hidden_size, name="stem_projection"
)
self._stem_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="stem_ln"
)
self._transformer = Pix2SeqTransformer(
max_seq_len=self._max_seq_len,
vocab_size=self._vocab_size,
hidden_size=self._hidden_size,
pos_encoding="learned",
num_encoder_layers=self._num_encoder_layers,
num_decoder_layers=self._num_decoder_layers,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
@property
def transformer(self) -> tf.keras.Model:
return self._transformer
def get_config(self):
return {
"backbone": self._backbone,
"backbone_endpoint_name": self._backbone_endpoint_name,
"max_seq_len": self._max_seq_len,
"vocab_size": self._vocab_size,
"hidden_size": self._hidden_size,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"drop_path": self._drop_path,
"drop_units": self._drop_units,
"drop_att": self._drop_att,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, transformer=self.transformer)
return items
def _generate_image_mask(
self, inputs: tf.Tensor, target_shape: tf.Tensor
) -> tf.Tensor:
"""Generates image mask from input image."""
mask = tf.expand_dims(
tf.cast(
tf.not_equal(tf.reduce_sum(inputs, axis=-1), 0.3), inputs.dtype
),
axis=-1,
)
mask = tf.image.resize(
mask, target_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
)
return mask
def call(
self,
inputs: tf.Tensor,
targets: Optional[tf.Tensor] = None,
training: bool = None,
) -> List[Any]:
features = self._backbone(inputs)[self._backbone_endpoint_name]
mask = tf.ones_like(features)
batch_size, h, w, num_channels = get_shape(features)
features = tf.reshape(features, [batch_size, h * w, num_channels])
features = self._stem_ln(
self._stem_projection(self._dropout(features, training)))
pos_emb = position_embedding_sine(
mask[:, :, :, 0], num_pos_features=self._hidden_size
)
pos_emb = tf.reshape(pos_emb, [batch_size, -1, self._hidden_size])
pos_emb = tf.cast(pos_emb, features.dtype)
tokens = None
if training:
logits = self._transformer(
{
"inputs": features,
"tokens": targets,
"pos_emb": pos_emb,
},
training,
)
else:
tokens, logits = self._transformer.infer({
"inputs": features,
"tokens": targets,
"pos_emb": pos_emb,
})
return [tokens, logits]
class Pix2SeqTransformer(tf.keras.layers.Layer):
"""Encoder and Decoder of Pix2Seq."""
def __init__(
self,
max_seq_len,
vocab_size,
hidden_size,
pos_encoding="learned",
num_encoder_layers=6,
num_decoder_layers=6,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
shared_embedding=True,
output_bias=True,
num_heads=8,
**kwargs
):
super().__init__(**kwargs)
self._max_seq_len = max_seq_len
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._pos_encoding = pos_encoding
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._shared_embedding = shared_embedding
self._output_bias = output_bias
self._num_heads = num_heads
add_seq_pos_emb(
self, self._pos_encoding, self._max_seq_len, self._hidden_size
)
add_vocab_token_emb(
self,
self._vocab_size,
self._hidden_size,
self._shared_embedding,
self._output_bias,
)
if self._num_encoder_layers > 0:
self._encoder = transformer.TransformerEncoder(
num_layers=self._num_encoder_layers,
dim=self._hidden_size,
mlp_ratio=4,
num_heads=self._num_heads,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
else:
self._encoder = None
self._output_ln_enc = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="output_ln_enc"
)
self._proj = tf.keras.layers.Dense(self._hidden_size, name="proj/linear")
self._proj_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="proj/ln"
)
self._proj_mlp = transformer.MLP(
num_layers=1,
dim=self._hidden_size,
mlp_ratio=4,
drop_path=self._drop_path,
drop_units=self._drop_units,
name="proj/mlp",
)
self._decoder = transformer.TransformerDecoder(
num_layers=self._num_decoder_layers,
dim=self._hidden_size,
mlp_ratio=4,
num_heads=self._num_heads,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
self._output_ln_dec = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="output_ln_dec"
)
def get_config(self):
return {
"max_seq_len": self._max_seq_len,
"vocab_size": self._vocab_size,
"hidden_size": self._hidden_size,
"pos_encoding": self._pos_encoding,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"drop_path": self._drop_path,
"drop_units": self._drop_units,
"drop_att": self._drop_att,
"shared_embedding": self._shared_embedding,
"output_bias": self._output_bias,
"num_heads": self._num_heads,
}
def call(self, inputs: tf.Tensor, training: bool = None):
sources = inputs["inputs"]
targets = inputs["tokens"]
mem_pos_embed = inputs["pos_emb"]
sources = sources + mem_pos_embed
if self._encoder is not None:
encoded = self._encoder(sources, None, training=training, ret_list=False)
else:
encoded = sources
encoded = self._output_ln_enc(encoded)
encoded = self._proj_ln(self._proj(encoded))
encoded = encoded + mem_pos_embed
encoded = self._proj_mlp(encoded, training=training)
seq_len = tf.shape(targets)[1]
seq_pos_emb = tf.expand_dims(self.seq_pos_emb[:seq_len], 0)
inp_embedding = outp_embedding = self.token_embedding
target_emb = tf.gather(inp_embedding, targets) + seq_pos_emb
self_attention_mask = 1.0 - get_ar_mask(seq_len, target_emb.dtype)
decoded, _ = self._decoder(
target_emb, encoded, None, self_attention_mask, None, training)
decoded = self._output_ln_dec(decoded)
decoded = tf.cast(decoded, seq_pos_emb.dtype)
outp_embedding = tf.cast(outp_embedding, seq_pos_emb.dtype)
logits = tf.matmul(decoded, outp_embedding, transpose_b=True)
if self._output_bias:
logits = tf.nn.bias_add(logits, self.outp_bias)
return logits
def infer(
self,
inputs: tf.Tensor,
max_seq_len=None,
temperature=1.0,
top_k=0,
top_p=0.4,
sampling_callback=None,
):
"""Autoregressive (without teacher-forcing) prediction.
Note: the autoregressive sampling/inference time can be further optimized by
caching *transformed* key / value inside multi-head attention for the
`encoded` and previously generated tokens, but this may make the code less
readable.
Args:
inputs: prompt - `int` tokens with shape of (bsz, prompt_len). encoded -
`float` encoded representations for conditioning with shape of (bsz,
size, dim). This can be optional in case of pure decoder.
max_seq_len: `int` of max generated sequence length (including prompt).
temperature: `float` scalar for scaling the logits before sampling.
top_k: `int` scalar for truncating top-k tokens according to logits before
token sampling.
top_p: `float` scalar specifying the threshold of cumulative probablity
for truncating tokens before token sampling.
sampling_callback: a callbak `function` that take `next_logits`, and
return `next_token`. This is used when users need a specific logic for
sampling. Default to `None` with standard free-form sampling.
Returns:
sampled tokens with shape of (bsz, max_seq_len-prompt_len).
logits (temperature-scaled) associated with sampled token, in shape of
(bsz, max_seq_len-prompt_len, vocab_size).
"""
sources = inputs["inputs"]
prompt = inputs["tokens"]
mem_pos_embed = inputs["pos_emb"]
sources = sources + mem_pos_embed
if self._encoder is not None:
encoded = self._encoder(sources, None, training=False, ret_list=False)
else:
encoded = sources
encoded = self._output_ln_enc(encoded)
encoded = self._proj_ln(self._proj(encoded))
encoded = encoded + mem_pos_embed
encoded = self._proj_mlp(encoded, training=False)
bsz = tf.shape(prompt)[0]
prompt_len = tf.shape(prompt)[1]
seq_len = self._max_seq_len if max_seq_len is None else max_seq_len
# (gunho) 500 (self._max_seq_len) -> 501 for prompt seq
seq_len = seq_len + 1
seq_pos_emb = tf.expand_dims(self.seq_pos_emb, 0)
inp_embedding = self.token_embedding
outp_embedding = inp_embedding
# Each step reads caches[:step] and tokens[step:next_step] and updates
# tokens[next_step], logits[next_step] and caches[step:next_step].
# On the first step, step=0, next_step=prompt_len. On subsequent steps
# next_step = step + 1.
def loop_body(step, caches, tokens, logits, is_prompt=False):
if is_prompt:
assert step == 0
x = tf.gather(inp_embedding, tf.transpose(tokens[:prompt_len]))
input_pos_embed = seq_pos_emb[:, :prompt_len]
x += input_pos_embed
self_attention_mask = 1.0 - get_ar_mask(prompt_len, x.dtype)
caches_in = None
else:
x = tf.gather(inp_embedding, tf.transpose(tokens[step]))
input_pos_embed = seq_pos_emb[:, step]
x += input_pos_embed
x = tf.expand_dims(x, 1) # (bsz, 1, d)
self_attention_mask = tf.ones([1, 1, 1, 1])
caches_in = tf.transpose(caches[:step], [1, 2, 0, 3])
decoded, caches_out = self._decoder(
x, encoded, caches_in, self_attention_mask, None, training=False)
decoded = self._output_ln_dec(decoded)
# (gunho) transformer.py uses tf.float32 for numeric stability.
decoded = tf.cast(decoded, seq_pos_emb.dtype)
next_logits = tf.matmul( # only take the last for sampling next token.
decoded, outp_embedding, transpose_b=True
)[:, -1]
if self._output_bias:
next_logits = tf.nn.bias_add(next_logits, self.outp_bias)
# Scale and trunctate logits and sample next token.
if sampling_callback:
next_token = sampling_callback(
next_logits, step, temperature, top_k, top_p
)
else:
sampling_logits = next_logits / tf.cast(temperature, tf.float32)
sampling_logits = top_logits(sampling_logits, k=top_k, p=top_p)
next_token = tf.random.categorical(
sampling_logits, num_samples=1, dtype=tf.int32
)[:, 0]
# Update internal states.
next_step = step + (prompt_len if is_prompt else 1)
caches_out = tf.transpose(caches_out, [2, 0, 1, 3])
caches = tf.tensor_scatter_nd_update(caches, [[step]], caches_out)
tokens = tf.tensor_scatter_nd_update(tokens, [[next_step]], [next_token])
logits = tf.tensor_scatter_nd_update(logits, [[next_step]], [next_logits])
return (next_step, caches, tokens, logits)
def cond(step, caches, tokens, logits):
del caches
del tokens
del logits
return tf.less(step, seq_len - 1)
caches_var = tf.zeros(
[seq_len-1, self._num_decoder_layers, bsz, self._hidden_size])
tokens_var = tf.zeros([seq_len, bsz], dtype=tf.int64)
logits_var = tf.zeros([seq_len, bsz, self._vocab_size], dtype=tf.float32)
indices = tf.expand_dims(tf.range(prompt_len), -1)
tokens_var = tf.tensor_scatter_nd_update(
tokens_var, indices, tf.transpose(prompt, [1, 0])
)
step = 0
step, caches_var, tokens_var, logits_var = loop_body(
step, caches_var, tokens_var, logits_var, is_prompt=True
)
if seq_len > prompt_len:
step, caches_var, tokens_var, logits_var = tf.while_loop(
cond=cond,
body=loop_body,
loop_vars=[step, caches_var, tokens_var, logits_var]
)
sampled_tokens = tf.transpose(tokens_var[prompt_len:], [1, 0])
sampled_tokens_logits = tf.transpose(logits_var[prompt_len:], [1, 0, 2])
sampled_tokens_logits = tf.reshape(
sampled_tokens_logits, [bsz, self._max_seq_len, self._vocab_size]
)
# sampled_tokens_logits : [bsz, max_seq_len-prompt_len, vocab_size]
return sampled_tokens, sampled_tokens_logits
| 22,027 | 32.941448 | 80 | py |
models | models-master/official/projects/pix2seq/modeling/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specialized Transformers for Pix2Seq.
the position embeddings are added to the query and key for every self- and
cross-attention layer.
"""
import tensorflow as tf
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder."""
def __init__(
self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
self_attention=True,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._self_attention = self_attention
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.enc_layers = [
TransformerEncoderLayer( # pylint: disable=g-complex-comprehension
dim,
mlp_ratio,
num_heads,
drop_path,
drop_units,
drop_att,
self_attention=self_attention,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='transformer_encoder' + suffix_id(i),
)
for i in range(num_layers)
]
def call(self, x, mask, training, ret_list=False):
x_list = [x]
for i in range(self._num_layers):
x = self.enc_layers[i](x, mask, training)
x_list.append(x)
return (x, x_list) if ret_list else x
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'self_attention': self._self_attention,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerEncoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
self_attention=True,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self.self_attention = self_attention
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
if self_attention:
self.mha_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='mha/ln',
)
self.mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='mha'
)
self.mlp = MLP(
1,
dim,
mlp_ratio,
drop_path,
drop_units,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='mlp',
)
self.dropp = DropPath(drop_path)
def call(self, x, mask, training):
# x shape (bsz, seq_len, dim_att), mask shape (bsz, seq_len, seq_len).
if self.self_attention:
x_ln = self.mha_ln(x)
x_residual = self.mha(x_ln, x_ln, x_ln, mask, training=training)
x = x + self.dropp(x_residual, training)
x = self.mlp(x, training)
return x
def get_config(self):
config = super().get_config()
updates = {
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'self_attention': self._self_attention,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
def suffix_id(i):
"""Return suffix id for layer/variable name."""
return '' if i == 0 else '_%d' % i
class DropPath(tf.keras.layers.Layer):
"""For stochastic depth."""
def __init__(self, drop_rate=0.0, **kwargs):
"""Initializes a drop path layer."""
super().__init__(**kwargs)
self._drop_rate = drop_rate
if self._drop_rate < 0 or self._drop_rate >= 1.0:
raise ValueError('drop_rate {} is outside [0, 1)'.format(self._drop_rate))
def call(self, x, training=False):
"""Performs a forward pass.
Args:
x: An input tensor of type tf.Tensor with shape [batch, height, width,
channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
if self._drop_rate == 0.0 or not training:
return x
keep_rate = 1.0 - self._drop_rate
xshape = tf.shape(x)
drop_mask_shape = [xshape[0]] + [1] * (len(xshape) - 1)
drop_mask = keep_rate + tf.random.uniform(drop_mask_shape, dtype=x.dtype)
drop_mask = tf.math.divide(tf.floor(drop_mask), keep_rate)
return x * drop_mask
def get_config(self):
config = super().get_config()
updates = {
'drop_rate': self._drop_rate,
}
config.update(updates)
return config
class FeedForwardLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim_att=256,
dim_mlp=1024,
drop_units=0.1,
use_ln=False,
ln_scale_shift=False,
**kwargs
):
super().__init__(**kwargs)
self._dim_att = dim_att
self._dim_mlp = dim_mlp
self._drop_units = drop_units
self._use_ln = use_ln
self._ln_scale_shift = ln_scale_shift
self.dense1 = tf.keras.layers.Dense(
dim_mlp, activation=tf.nn.gelu, name='dense1'
)
self.dropout = tf.keras.layers.Dropout(drop_units)
self.dense2 = tf.keras.layers.Dense(dim_att, name='dense2')
if use_ln:
self.ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='mlp_ln',
)
else:
self.ln = lambda x: x
def call(self, x, training):
return self.dense2(self.dropout(self.ln(self.dense1(x)), training=training))
def get_config(self):
config = super().get_config()
updates = {
'dim_att': self._dim_att,
'dim_mlp': self._dim_mlp,
'drop_units': self._drop_units,
'use_ln': self._use_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class MLP(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
num_layers,
dim,
mlp_ratio,
drop_path=0.1,
drop_units=0.0,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._drop_path = drop_path
self._drop_units = drop_units
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.mlp_layers = []
self.layernorms = []
for i in range(num_layers):
self.mlp_layers.append(
FeedForwardLayer(
dim,
dim * mlp_ratio,
drop_units,
use_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='ffn' + suffix_id(i),
)
)
self.layernorms.append(
tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='ffn/ln' + suffix_id(i),
)
)
self.dropp = DropPath(drop_path)
def call(self, x, training, ret_list=False):
x_list = [x]
for i in range(self._num_layers):
x_residual = self.mlp_layers[i](self.layernorms[i](x), training)
x = x + self.dropp(x_residual, training)
x_list.append(x)
return (x, x_list) if ret_list else x
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerDecoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
dim_x_att=None,
self_attention=True,
cross_attention=True,
use_mlp=True,
use_enc_ln=False,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._dim_x_att = dim_x_att
self._self_attention = self_attention
self._cross_attention = cross_attention
self._use_mlp = use_mlp
self._use_enc_ln = use_enc_ln
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
if self_attention:
self.self_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='self_mha/ln',
)
self.self_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='self_mha'
)
if cross_attention:
self.cross_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='cross_mha/ln',
)
if use_enc_ln:
self.enc_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='cross_mha/enc_ln',
)
else:
self.enc_ln = lambda x: x
dim_x_att = dim if dim_x_att is None else dim_x_att
self.cross_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim_x_att // num_heads, dropout=drop_att, name='cross_mha'
)
if use_mlp:
self.mlp = MLP(
1,
dim,
mlp_ratio,
drop_path,
drop_units,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='mlp',
)
self.dropp = DropPath(drop_path)
def call(self, x, enc, cache, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
x_for_cache = []
if self._self_attention:
x_for_cache = x_ln = kv_ln = self.self_ln(x)
if cache is not None: # Augment kv_ln with cache in (bsz, c_size, d).
q_size, k_size = tf.shape(x)[1], tf.shape(cache)[1]
mask_self = tf.concat([tf.ones([1, 1, q_size, k_size]), mask_self], -1)
kv_ln = tf.concat([cache, x_ln], axis=1)
x_res = self.self_mha(x_ln, kv_ln, kv_ln, mask_self, training=training)
x = x + self.dropp(x_res, training)
if self._cross_attention:
x_ln = self.cross_ln(x)
enc = self.enc_ln(enc)
x_res = self.cross_mha(x_ln, enc, enc, mask_cross, training=training)
x = x + self.dropp(x_res, training)
if self._use_mlp:
x = self.mlp(x, training)
return x, x_for_cache
def get_config(self):
config = super().get_config()
updates = {
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'dim_x_att': self._dim_x_att,
'self_attention': self._self_attention,
'cross_attention': self._cross_attention,
'use_mlp': self._use_mlp,
'use_enc_ln': self._use_enc_ln,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerDecoder(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
dim_x_att=None,
self_attention=True,
cross_attention=True,
use_mlp=True,
use_enc_ln=False,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._dim_x_att = dim_x_att
self._self_attention = self_attention
self._cross_attention = cross_attention
self._use_mlp = use_mlp
self._use_enc_ln = use_enc_ln
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.dec_layers = [
TransformerDecoderLayer( # pylint: disable=g-complex-comprehension
dim,
mlp_ratio,
num_heads,
drop_path,
drop_units,
drop_att,
dim_x_att=dim_x_att,
self_attention=self_attention,
cross_attention=cross_attention,
use_mlp=use_mlp,
use_enc_ln=use_enc_ln,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='transformer_decoder_layer' + suffix_id(i),
)
for i in range(num_layers)
]
def call(self, x, enc, caches, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
presents = []
for i in range(self._num_layers):
cache = None if caches is None else caches[i]
x, x_for_cache = self.dec_layers[i](
x, enc, cache, mask_self, mask_cross, training
)
presents.append(x_for_cache)
return x, tf.stack(presents)
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'dim_x_att': self._dim_x_att,
'self_attention': self._self_attention,
'cross_attention': self._cross_attention,
'use_mlp': self._use_mlp,
'use_enc_ln': self._use_enc_ln,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
| 15,516 | 27.895717 | 90 | py |
models | models-master/official/projects/pix2seq/tasks/pix2seq_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pix2Seq detection task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.pix2seq import utils
from official.projects.pix2seq.configs import pix2seq as pix2seq_cfg
from official.projects.pix2seq.dataloaders import pix2seq_input
from official.projects.pix2seq.modeling import pix2seq_model
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.modeling import backbones
@task_factory.register_task_cls(pix2seq_cfg.Pix2SeqTask)
class Pix2SeqTask(base_task.Task):
"""A single-replica view of training procedure.
Pix2Seq task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build Pix2Seq model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self._task_config.model.input_size
)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self._task_config.model.backbone,
norm_activation_config=self._task_config.model.norm_activation,
)
model = pix2seq_model.Pix2Seq(
backbone,
self._task_config.model.backbone_endpoint_name,
self._task_config.model.max_num_instances * 5,
self._task_config.model.vocab_size,
self._task_config.model.hidden_size,
self._task_config.model.num_encoder_layers,
self._task_config.model.num_decoder_layers,
self._task_config.model.drop_path,
self._task_config.model.drop_units,
self._task_config.model.drop_att,
)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self._task_config.init_checkpoint:
return
ckpt_dir_or_file = self._task_config.init_checkpoint
# Restoring checkpoint.
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self._task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self._task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info(
'Finished loading pretrained checkpoint from %s', ckpt_dir_or_file
)
def build_inputs(
self, params, input_context: Optional[tf.distribute.InputContext] = None
):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id
)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id,
)
else:
raise ValueError(
'Unknown decoder type: {}!'.format(params.decoder.type)
)
parser = pix2seq_input.Parser(
eos_token_weight=self._task_config.losses.eos_token_weight,
output_size=self._task_config.model.input_size[:2],
max_num_boxes=self._task_config.model.max_num_instances,
coord_vocab_shift=self._task_config.coord_vocab_shift,
quantization_bins=self._task_config.quantization_bins,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_color_jitter_strength=params.aug_color_jitter_strength,
label_shift=params.label_shift,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, outputs, labels, aux_losses=None):
"""Builds DETR losses."""
targets = labels['targets']
weights = labels['weights']
targets = tf.one_hot(targets, self._task_config.model.vocab_size)
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)(targets, outputs)
weights = tf.cast(weights, loss.dtype)
loss = tf.reduce_sum(loss * weights) / tf.reduce_sum(weights)
aux_losses = tf.add_n(aux_losses) if aux_losses else 0.0
total_loss = loss + aux_losses
return total_loss
def build_metrics(self, training=True):
"""Builds detection metrics."""
metrics = []
metric_names = ['loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
)
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
_, outputs = model(features, labels['inputs'], training=True)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses
)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# Trainer class handles loss metric for you.
logs = {self.loss: loss}
all_losses = {
'loss': loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
tokens, logits = model(features, labels['prompt'], training=False)
# loss = self.build_losses(
# outputs=outputs, labels=labels, aux_losses=model.losses)
loss = 0.0
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
# Evaluator class handles loss metric for you.
logs = {self.loss: loss}
outputs = utils.decode_object_seq_to_bbox(
logits,
tokens,
self._task_config.quantization_bins,
self._task_config.coord_vocab_shift,
)
pred_classes, pred_bboxes, scores, pred_num = outputs
image_size = features.shape[1:3].as_list()
# scale points to original image size during eval.
scale = utils.tf_float32(image_size)[tf.newaxis, :] / utils.tf_float32(
labels['image_info'][:, 1:2, :]
)
scale = scale * utils.tf_float32(labels['image_info'][:, 0:1, :])
pred_bboxes = utils.scale_points(pred_bboxes, scale)
predictions = {
'detection_boxes': pred_bboxes,
'detection_scores': scores,
'detection_classes': pred_classes,
'num_detections': pred_num,
'source_id': labels['id'],
'image_info': labels['image_info'],
}
ground_truths = {
'source_id': labels['id'],
'height': labels['image_info'][:, 0:1, 0],
'width': labels['image_info'][:, 0:1, 1],
'num_detections': tf.reduce_sum(
tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1
),
'boxes': labels['gt_boxes'],
'classes': labels['classes'],
'is_crowds': labels['is_crowd'],
}
logs.update({'predictions': predictions, 'ground_truths': ground_truths})
all_losses = {
'loss': loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
state.update_state(
step_outputs['ground_truths'], step_outputs['predictions']
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return aggregated_logs.result()
| 10,915 | 33.875399 | 80 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet_plus.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for the AssembleNet++ [2] models (without object input).
Requires the AssembleNet++ architecture to be specified in
FLAGS.model_structure (and optionally FLAGS.model_edge_weights). This is
identical to the form described in assemblenet.py for the AssembleNet. Please
check assemblenet.py for the detailed format of the model strings.
AssembleNet++ adds `peer-attention' to the basic AssembleNet, which allows each
conv. block connection to be conditioned differently based on another block [2].
It is a form of channel-wise attention. Note that we learn to apply attention
independently for each frame.
The `peer-attention' implementation in this file is the version that enables
one-shot differentiable search of attention connectivity (Fig. 2 in [2]), using
a softmax weighted summation of possible attention vectors.
[2] Michael S. Ryoo, AJ Piergiovanni, Juhana Kangaspunta, Anelia Angelova,
AssembleNet++: Assembling Modality Representations via Attention
Connections. ECCV 2020
https://arxiv.org/abs/2008.08072
In order to take advantage of object inputs, one will need to set the flag
FLAGS.use_object_input as True, and provide the list of input tensors as an
input to the network, as shown in run_asn_with_object.py. This will require a
pre-processed object data stream.
It uses (2+1)D convolutions for video representations. The main AssembleNet++
takes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are
mixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is
necessary. This is to run this on TPU efficiently.
"""
import functools
from typing import Any, Dict, List, Mapping, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.assemblenet.configs import assemblenet as cfg
from official.projects.assemblenet.modeling import assemblenet as asn
from official.projects.assemblenet.modeling import rep_flow_2d_layer as rf
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
layers = tf.keras.layers
def softmax_merge_peer_attentions(peers):
"""Merge multiple peer-attention vectors with softmax weighted sum.
Summation weights are to be learned.
Args:
peers: A list of `Tensors` of size `[batch*time, channels]`.
Returns:
The output `Tensor` of size `[batch*time, channels].
"""
data_format = tf.keras.backend.image_data_format()
dtype = peers[0].dtype
assert data_format == 'channels_last'
initial_attn_weights = tf.keras.initializers.TruncatedNormal(stddev=0.01)(
[len(peers)])
attn_weights = tf.cast(tf.nn.softmax(initial_attn_weights), dtype)
weighted_peers = []
for i, peer in enumerate(peers):
weighted_peers.append(attn_weights[i] * peer)
return tf.add_n(weighted_peers)
def apply_attention(inputs,
attention_mode=None,
attention_in=None,
use_5d_mode=False):
"""Applies peer-attention or self-attention to the input tensor.
Depending on the attention_mode, this function either applies channel-wise
self-attention or peer-attention. For the peer-attention, the function
combines multiple candidate attention vectors (given as attention_in), by
learning softmax-sum weights described in the AssembleNet++ paper. Note that
the attention is applied individually for each frame, which showed better
accuracies than using video-level attention.
Args:
inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
Returns:
The output `Tensor` after concatenation.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
if attention_mode == 'peer':
attn = softmax_merge_peer_attentions(attention_in)
else:
attn = tf.math.reduce_mean(inputs, [h_channel_loc, h_channel_loc + 1])
attn = tf.keras.layers.Dense(
units=inputs.shape[-1],
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
attn = tf.math.sigmoid(attn)
channel_attn = tf.expand_dims(
tf.expand_dims(attn, h_channel_loc), h_channel_loc)
inputs = tf.math.multiply(inputs, channel_attn)
return inputs
class _ApplyEdgeWeight(layers.Layer):
"""Multiply weight on each input tensor.
A weight is assigned for each connection (i.e., each input tensor). This layer
is used by the fusion_with_peer_attention to compute the weighted inputs.
"""
def __init__(self,
weights_shape,
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: Optional[int] = None,
**kwargs):
"""Constructor.
Args:
weights_shape: A list of intergers. Each element means number of edges.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet++ model structure connection weights in
the string format.
num_object_classes: Assemblenet++ structure used object inputs so we
should use what dataset classes you might be use (e.g. ADE-20k 151
classes)
**kwargs: pass through arguments.
Returns:
The output `Tensor` after concatenation.
"""
super(_ApplyEdgeWeight, self).__init__(**kwargs)
self._weights_shape = weights_shape
self._index = index
self._use_5d_mode = use_5d_mode
self._model_edge_weights = model_edge_weights
self._num_object_classes = num_object_classes
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
def get_config(self):
config = {
'weights_shape': self._weights_shape,
'index': self._index,
'use_5d_mode': self._use_5d_mode,
'model_edge_weights': self._model_edge_weights,
'num_object_classes': self._num_object_classes
}
base_config = super(_ApplyEdgeWeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
if self._weights_shape[0] == 1:
self._edge_weights = 1.0
return
if self._index is None or not self._model_edge_weights:
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
trainable=True,
name='agg_weights')
else:
initial_weights_after_sigmoid = np.asarray(
self._model_edge_weights[self._index][0]).astype('float32')
# Initial_weights_after_sigmoid is never 0, as the initial weights are
# based the results of a successful connectivity search.
initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.constant_initializer(initial_weights),
trainable=False,
name='agg_weights')
def call(self,
inputs: List[tf.Tensor],
training: Optional[bool] = None) -> Mapping[Any, List[tf.Tensor]]:
use_5d_mode = self._use_5d_mode
dtype = inputs[0].dtype
assert len(inputs) > 1
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually
# big. Since we do not want the object channel size to increase the number
# of parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != self._num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
# loads or creates weight variables to fuse multiple inputs
weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))
# Compute weighted inputs. We group inputs with the same channels.
per_channel_inps = dict({0: []})
for i, inp in enumerate(inputs):
if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long
assert sm_size[0] != 0
ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]
if use_5d_mode:
inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],
padding='same')(
inp)
else:
inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,
padding='same')(
inp)
weights = tf.cast(weights, inp.dtype)
if inp.shape[-1] in per_channel_inps:
per_channel_inps[inp.shape[-1]].append(weights[i] * inp)
else:
per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})
return per_channel_inps
def fusion_with_peer_attention(inputs: List[tf.Tensor],
index: Optional[int] = None,
attention_mode: Optional[str] = None,
attention_in: Optional[List[tf.Tensor]] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: Optional[int] = None):
"""Weighted summation of multiple tensors, while using peer-attention.
Summation weights are to be learned. Uses spatial max pooling and 1x1 conv.
to match their sizes. Before the summation, each connection (i.e., each input)
itself is scaled with channel-wise peer-attention. Notice that attention is
applied for each connection, conditioned based on attention_in.
Args:
inputs: A list of `Tensors`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
num_object_classes: Assemblenet++ structure used object inputs so we should
use what dataset classes you might be use (e.g. ADE-20k 151 classes)
Returns:
The output `Tensor` after concatenation.
"""
if use_5d_mode:
h_channel_loc = 2
conv_function = asn.conv3d_same_padding
else:
h_channel_loc = 1
conv_function = asn.conv2d_fixed_padding
# If only 1 input.
if len(inputs) == 1:
inputs[0] = apply_attention(inputs[0], attention_mode, attention_in,
use_5d_mode)
return inputs[0]
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually big.
# Since we do not want the object channel size to increase the number of
# parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
per_channel_inps = _ApplyEdgeWeight(
weights_shape=[len(inputs)],
index=index,
use_5d_mode=use_5d_mode,
model_edge_weights=model_edge_weights)(
inputs)
# Implementation of connectivity with peer-attention
if attention_mode:
for key, channel_inps in per_channel_inps.items():
for idx in range(len(channel_inps)):
with tf.name_scope('Connection_' + str(key) + '_' + str(idx)):
channel_inps[idx] = apply_attention(channel_inps[idx], attention_mode,
attention_in, use_5d_mode)
# Adding 1x1 conv layers (to match channel size) and fusing all inputs.
# We add inputs with the same channels first before applying 1x1 conv to save
# memory.
inps = []
for key, channel_inps in per_channel_inps.items():
if len(channel_inps) < 1:
continue
if len(channel_inps) == 1:
if key == lg_channel:
inp = channel_inps[0]
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
else:
if key == lg_channel:
inp = tf.add_n(channel_inps)
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
return tf.add_n(inps)
def object_conv_stem(inputs):
"""Layers for an object input stem.
It expects its input tensor to have a separate channel for each object class.
Each channel should be specify each object class.
Args:
inputs: A `Tensor`.
Returns:
The output `Tensor`.
"""
inputs = tf.keras.layers.MaxPool2D(
pool_size=4, strides=4, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
class AssembleNetPlus(tf.keras.Model):
"""AssembleNet++ backbone."""
def __init__(self,
block_fn,
num_blocks: List[int],
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
use_object_input: bool = False,
attention_mode: str = 'peer',
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
**kwargs):
"""Generator for AssembleNet++ models.
Args:
block_fn: `function` for the block to use within the model. Currently only
has `bottleneck_block_interleave as its option`.
num_blocks: list of 4 `int`s denoting the number of blocks to include in
each of the 4 block groups. Each group consists of blocks that take
inputs of the same resolution.
num_frames: the number of frames in the input tensor.
model_structure: AssembleNetPlus model structure in the string format.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
Dimension should be `[batch*time, height, width, channels]`.
model_edge_weights: AssembleNet model structure connection weight in the
string format.
use_object_input : 'bool' values whether using object inputs
attention_mode : 'str' , default = 'self', If we use peer attention 'peer'
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
**kwargs: pass through arguments.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the AssembleNetPlus model.
"""
data_format = tf.keras.backend.image_data_format()
# Creation of the model graph.
logging.info('model_structure=%r', model_structure)
logging.info('model_structure=%r', model_structure)
logging.info('model_edge_weights=%r', model_edge_weights)
structure = model_structure
if use_object_input:
original_inputs = tf.keras.Input(shape=input_specs[0].shape[1:])
object_inputs = tf.keras.Input(shape=input_specs[1].shape[1:])
input_specs = input_specs[0]
else:
original_inputs = tf.keras.Input(shape=input_specs.shape[1:])
object_inputs = None
original_num_frames = num_frames
assert num_frames > 0, f'Invalid num_frames {num_frames}'
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(structure)):
grouping[structure[i][0]].append(i)
stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])
assert stem_count != 0
stem_filters = 128 // stem_count
if len(input_specs.shape) == 5:
first_dim = (
input_specs.shape[0] * input_specs.shape[1]
if input_specs.shape[0] and input_specs.shape[1] else -1)
reshape_inputs = tf.reshape(original_inputs,
(first_dim,) + input_specs.shape[2:])
elif len(input_specs.shape) == 4:
reshape_inputs = original_inputs
else:
raise ValueError(
f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')
if grouping[-2]:
# Instead of loading optical flows as inputs from data pipeline, we are
# applying the "Representation Flow" to RGB frames so that we can compute
# the flow within TPU/GPU on fly. It's essentially optical flow since we
# do it with RGBs.
axis = 3 if data_format == 'channels_last' else 1
flow_inputs = rf.RepresentationFlow(
original_num_frames,
depth=reshape_inputs.shape.as_list()[axis],
num_iter=40,
bottleneck=1)(
reshape_inputs)
streams = []
for i in range(len(structure)):
with tf.name_scope('Node_' + str(i)):
if structure[i][0] == -1:
inputs = asn.rgb_conv_stem(
reshape_inputs,
original_num_frames,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -2:
inputs = asn.flow_conv_stem(
flow_inputs,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -3:
# In order to use the object inputs, you need to feed your object
# input tensor here.
inputs = object_conv_stem(object_inputs)
streams.append(inputs)
else:
block_number = structure[i][0]
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
logging.info(grouping)
nodes_below = []
for k in range(-3, structure[i][0]):
nodes_below = nodes_below + grouping[k]
peers = []
if attention_mode:
lg_channel = -1
# To show structures for attention we show nodes_below
logging.info(nodes_below)
for k in nodes_below:
logging.info(streams[k].shape)
lg_channel = max(streams[k].shape[3], lg_channel)
for node_index in nodes_below:
attn = tf.reduce_mean(streams[node_index], [1, 2])
attn = tf.keras.layers.Dense(
units=lg_channel,
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
peers.append(attn)
combined_inputs = fusion_with_peer_attention(
combined_inputs,
index=i,
attention_mode=attention_mode,
attention_in=peers,
use_5d_mode=False)
graph = asn.block_group(
inputs=combined_inputs,
filters=structure[i][2],
block_fn=block_fn,
blocks=num_blocks[block_number],
strides=structure[i][4],
name='block_group' + str(i),
block_level=structure[i][0],
num_frames=num_frames,
temporal_dilation=structure[i][3])
streams.append(graph)
if use_object_input:
inputs = [original_inputs, object_inputs]
else:
inputs = original_inputs
super(AssembleNetPlus, self).__init__(
inputs=inputs, outputs=streams, **kwargs)
@tf.keras.utils.register_keras_serializable(package='Vision')
class AssembleNetPlusModel(tf.keras.Model):
"""An AssembleNet++ model builder."""
def __init__(self,
backbone,
num_classes,
num_frames: int,
model_structure: List[Any],
input_specs: Optional[Dict[str,
tf.keras.layers.InputSpec]] = None,
max_pool_predictions: bool = False,
use_object_input: bool = False,
**kwargs):
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
if use_object_input and 'object' not in input_specs:
input_specs['object'] = layers.InputSpec(shape=[None, None, None, None])
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'num_frames': num_frames,
'input_specs': input_specs,
'model_structure': model_structure,
}
self._input_specs = input_specs
self._backbone = backbone
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(model_structure)):
grouping[model_structure[i][0]].append(i)
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
if use_object_input:
streams = self._backbone(inputs=[inputs['image'], inputs['object']])
else:
streams = self._backbone(inputs=inputs['image'])
outputs = asn.multi_stream_heads(
streams,
grouping[3],
num_frames,
num_classes,
max_pool_predictions=max_pool_predictions)
super(AssembleNetPlusModel, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def assemblenet_plus(assemblenet_depth: int,
num_classes: int,
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
use_object_input: bool = False,
attention_mode: Optional[str] = None,
max_pool_predictions: bool = False,
**kwargs):
"""Returns the AssembleNet++ model for a given size and number of output classes."""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
if use_object_input:
# assuming input_specs = [vide, obj] when use_object_input = True
input_specs_dict = {'image': input_specs[0], 'object': input_specs[1]}
else:
input_specs_dict = {'image': input_specs}
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
backbone = AssembleNetPlus(
block_fn=params['block'],
num_blocks=params['num_blocks'],
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input=use_object_input,
attention_mode=attention_mode,
**kwargs)
return AssembleNetPlusModel(
backbone,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
use_object_input=use_object_input,
max_pool_predictions=max_pool_predictions,
**kwargs)
@backbone_factory.register_backbone_builder('assemblenet_plus')
def build_assemblenet_plus(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds assemblenet++ backbone."""
del l2_regularizer
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'assemblenet_plus'
assemblenet_depth = int(backbone_cfg.model_id)
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
model_structure, model_edge_weights = cfg.blocks_to_flat_lists(
backbone_cfg.blocks)
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
block_fn = functools.partial(
params['block'],
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
backbone = AssembleNetPlus(
block_fn=block_fn,
num_blocks=params['num_blocks'],
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input=backbone_cfg.use_object_input,
attention_mode=backbone_cfg.attention_mode,
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
logging.info('Number of parameters in AssembleNet++ backbone: %f M.',
backbone.count_params() / 10.**6)
return backbone
@model_factory.register_model_builder('assemblenet_plus')
def build_assemblenet_plus_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.AssembleNetPlusModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds assemblenet++ model."""
input_specs_dict = {'image': input_specs}
backbone = build_assemblenet_plus(input_specs, model_config.backbone,
model_config.norm_activation,
l2_regularizer)
backbone_cfg = model_config.backbone.get()
model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)
model = AssembleNetPlusModel(
backbone,
num_classes=num_classes,
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=model_config.max_pool_predictions,
use_object_input=backbone_cfg.use_object_input)
return model
| 28,838 | 37.400799 | 127 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet_plus_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for assemblenet++ network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.assemblenet.configs import assemblenet as asn_config
from official.projects.assemblenet.modeling import assemblenet_plus as asnp
class AssembleNetPlusTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((50, True, ''), (50, False, ''),
(50, False, 'peer'), (50, True, 'peer'),
(50, True, 'self'), (50, False, 'self'))
def test_network_creation(self, depth, use_object_input, attention_mode):
batch_size = 2
num_frames = 32
img_size = 64
num_classes = 101 # ufc-101
num_object_classes = 151 # 151 is for ADE-20k
if use_object_input:
vid_input = (batch_size * num_frames, img_size, img_size, 3)
obj_input = (batch_size * num_frames, img_size, img_size,
num_object_classes)
input_specs = (tf.keras.layers.InputSpec(shape=(vid_input)),
tf.keras.layers.InputSpec(shape=(obj_input)))
vid_inputs = np.random.rand(batch_size * num_frames, img_size, img_size,
3)
obj_inputs = np.random.rand(batch_size * num_frames, img_size, img_size,
num_object_classes)
inputs = [vid_inputs, obj_inputs]
# We are using the full_asnp50_structure, since we feed both video and
# object.
model_structure = asn_config.full_asnp50_structure # Uses object input.
edge_weights = asn_config.full_asnp_structure_weights
else:
# video input: (batch_size, FLAGS.num_frames, image_size, image_size, 3)
input_specs = tf.keras.layers.InputSpec(
shape=(batch_size, num_frames, img_size, img_size, 3))
inputs = np.random.rand(batch_size, num_frames, img_size, img_size, 3)
# Here, we are using model_structures.asn50_structure for AssembleNet++
# instead of full_asnp50_structure. By using asn50_structure, it
# essentially becomes AssembleNet++ without objects, only requiring RGB
# inputs (and optical flow to be computed inside the model).
model_structure = asn_config.asn50_structure
edge_weights = asn_config.asn_structure_weights
model = asnp.assemblenet_plus(
assemblenet_depth=depth,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
model_edge_weights=edge_weights,
input_specs=input_specs,
use_object_input=use_object_input,
attention_mode=attention_mode,
)
outputs = model(inputs)
self.assertAllEqual(outputs.shape.as_list(), [batch_size, num_classes])
if __name__ == '__main__':
tf.test.main()
| 3,389 | 39.843373 | 78 | py |
models | models-master/official/projects/assemblenet/modeling/rep_flow_2d_layer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for 'Representation Flow' layer [1].
Representation flow layer is a generalization of optical flow extraction; the
layer could be inserted anywhere within a CNN to capture feature movements. This
is the version taking 4D tensor with the shape [batch*time, height, width,
channels], to make this run on TPU.
[1] AJ Piergiovanni and Michael S. Ryoo,
Representation Flow for Action Recognition. CVPR 2019.
"""
import numpy as np
import tensorflow as tf
layers = tf.keras.layers
BATCH_NORM_DECAY = 0.99
BATCH_NORM_EPSILON = 1e-5
def build_batch_norm(init_zero: bool = False,
bn_decay: float = BATCH_NORM_DECAY,
bn_epsilon: float = BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Performs a batch normalization followed by a ReLU.
Args:
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if data_format == 'channels_first':
axis = 1
else:
axis = -1
if use_sync_bn:
batch_norm = layers.experimental.SyncBatchNormalization(
axis=axis,
momentum=bn_decay,
epsilon=bn_epsilon,
gamma_initializer=gamma_initializer)
else:
batch_norm = layers.BatchNormalization(
axis=axis,
momentum=bn_decay,
epsilon=bn_epsilon,
fused=True,
gamma_initializer=gamma_initializer)
return batch_norm
def divergence(p1, p2, f_grad_x, f_grad_y, name):
"""Computes the divergence value used with TV-L1 optical flow algorithm.
Args:
p1: 'Tensor' input.
p2: 'Tensor' input in the next frame.
f_grad_x: 'Tensor' x gradient of F value used in TV-L1.
f_grad_y: 'Tensor' y gradient of F value used in TV-L1.
name: 'str' name for the variable scope.
Returns:
A `Tensor` with the same `data_format` and shape as input.
"""
data_format = tf.keras.backend.image_data_format()
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
with tf.name_scope('divergence_' + name):
if data_format == 'channels_last':
p1 = tf.pad(p1[:, :, :-1, :], [[0, 0], [0, 0], [1, 0], [0, 0]])
p2 = tf.pad(p2[:, :-1, :, :], [[0, 0], [1, 0], [0, 0], [0, 0]])
else:
p1 = tf.pad(p1[:, :, :, :-1], [[0, 0], [0, 0], [0, 0], [1, 0]])
p2 = tf.pad(p2[:, :, :-1, :], [[0, 0], [0, 0], [1, 0], [0, 0]])
grad_x = tf.nn.conv2d(p1, f_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad_y = tf.nn.conv2d(p2, f_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
return grad_x + grad_y
def forward_grad(x, f_grad_x, f_grad_y, name):
data_format = tf.keras.backend.image_data_format()
with tf.name_scope('forward_grad_' + name):
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
grad_x = tf.nn.conv2d(x, f_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad_y = tf.nn.conv2d(x, f_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
return grad_x, grad_y
def norm_img(x):
mx = tf.reduce_max(x)
mn = tf.reduce_min(x)
if mx == mn:
return x
else:
return 255 * (x - mn) / (mx - mn)
class RepresentationFlow(layers.Layer):
"""Computes the representation flow motivated by TV-L1 optical flow."""
def __init__(self,
time: int,
depth: int,
num_iter: int = 20,
bottleneck: int = 32,
train_feature_grad: bool = False,
train_divergence: bool = False,
train_flow_grad: bool = False,
train_hyper: bool = False,
**kwargs):
"""Constructor.
Args:
time: 'int' number of frames in the input tensor.
depth: channel depth of the input tensor.
num_iter: 'int' number of iterations to use for the flow computation.
bottleneck: 'int' number of filters to be used for the flow computation.
train_feature_grad: Train image grad params.
train_divergence: train divergence params
train_flow_grad: train flow grad params.
train_hyper: train rep flow hyperparams.
**kwargs: keyword arguments to be passed to the parent constructor.
Returns:
A `Tensor` with the same `data_format` and shape as input.
"""
super(RepresentationFlow, self).__init__(**kwargs)
self._time = time
self._depth = depth
self._num_iter = num_iter
self._bottleneck = bottleneck
self._train_feature_grad = train_feature_grad
self._train_divergence = train_divergence
self._train_flow_grad = train_flow_grad
self._train_hyper = train_hyper
def get_config(self):
config = {
'time': self._time,
'num_iter': self._num_iter,
'bottleneck': self._bottleneck,
'train_feature_grad': self._train_feature_grad,
'train_divergence': self._train_divergence,
'train_flow_grad': self._train_flow_grad,
'train_hyper': self._train_hyper,
}
base_config = super(RepresentationFlow, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
img_grad = np.array([-0.5, 0, 0.5], dtype='float32')
img_grad_x = np.repeat(
np.reshape(img_grad, (1, 3, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.img_grad_x = self.add_weight(
shape=img_grad_x.shape,
initializer=tf.constant_initializer(img_grad_x),
trainable=self._train_feature_grad,
name='img_grad_x')
img_grad_y = np.repeat(
np.reshape(img_grad, (3, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.img_grad_y = self.add_weight(
shape=img_grad_y.shape,
initializer=tf.constant_initializer(img_grad_y),
trainable=self._train_feature_grad,
name='img_grad_y')
f_grad = np.array([-1, 1], dtype='float32')
f_grad_x = np.repeat(
np.reshape(f_grad, (1, 2, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_x = self.add_weight(
shape=f_grad_x.shape,
initializer=tf.constant_initializer(f_grad_x),
trainable=self._train_divergence,
name='f_grad_x')
f_grad_y = np.repeat(
np.reshape(f_grad, (2, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_y = self.add_weight(
shape=f_grad_y.shape,
initializer=tf.constant_initializer(f_grad_y),
trainable=self._train_divergence,
name='f_grad_y')
f_grad_x2 = np.repeat(
np.reshape(f_grad, (1, 2, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_x2 = self.add_weight(
shape=f_grad_x2.shape,
initializer=tf.constant_initializer(f_grad_x2),
trainable=self._train_flow_grad,
name='f_grad_x2')
f_grad_y2 = np.repeat(
np.reshape(f_grad, (2, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_y2 = self.add_weight(
shape=f_grad_y2.shape,
initializer=tf.constant_initializer(f_grad_y2),
trainable=self._train_flow_grad,
name='f_grad_y2')
self.t = self.add_weight(
name='theta',
initializer=tf.constant_initializer(0.3),
trainable=self._train_hyper)
self.l = self.add_weight(
name='lambda',
initializer=tf.constant_initializer(0.15),
trainable=self._train_hyper)
self.a = self.add_weight(
name='tau',
initializer=tf.constant_initializer(0.25),
trainable=self._train_hyper)
self.t = tf.abs(self.t) + 1e-12
self.l_t = self.l * self.t
self.taut = self.a / self.t
self._bottleneck_conv2 = None
self._bottleneck_conv2 = None
if self._bottleneck > 1:
self._bottleneck_conv1 = layers.Conv2D(
filters=self._bottleneck,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling(),
name='rf/bottleneck1')
self._bottleneck_conv2 = layers.Conv2D(
filters=self._depth,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling(),
name='rf/bottleneck2')
self._batch_norm = build_batch_norm(init_zero=True)
def call(self, inputs: tf.Tensor, training: bool = None) -> tf.Tensor:
"""Perform representation flows.
Args:
inputs: list of `Tensors` of shape `[batch*time, height, width,
channels]`.
training: True for training phase.
Returns:
A tensor of the same shape as the inputs.
"""
data_format = tf.keras.backend.image_data_format()
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
axis = 3 if data_format == 'channels_last' else 1 # channel axis
dtype = inputs.dtype
residual = inputs
depth = inputs.shape.as_list()[axis]
# assert depth == self._depth, f'rep_flow {depth} != {self._depth}'
if self._bottleneck == 1:
inputs = tf.reduce_mean(inputs, axis=axis)
inputs = tf.expand_dims(inputs, -1)
elif depth != self._bottleneck:
inputs = self._bottleneck_conv1(inputs)
input_shape = inputs.shape.as_list()
inp = norm_img(inputs)
inp = tf.reshape(
inp,
(-1, self._time, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
inp = tf.ensure_shape(
inp, (None, self._time, input_shape[1], input_shape[2], input_shape[3]))
img1 = tf.reshape(
inp[:, :-1], (-1, tf.shape(inp)[2], tf.shape(inp)[3], tf.shape(inp)[4]))
img2 = tf.reshape(
inp[:, 1:], (-1, tf.shape(inp)[2], tf.shape(inp)[3], tf.shape(inp)[4]))
img1 = tf.ensure_shape(
img1, (None, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
img2 = tf.ensure_shape(
img2, (None, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
u1 = tf.zeros_like(img1, dtype=dtype)
u2 = tf.zeros_like(img2, dtype=dtype)
l_t = self.l_t
taut = self.taut
grad2_x = tf.nn.conv2d(
img2, self.img_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad2_y = tf.nn.conv2d(
img2, self.img_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
p11 = tf.zeros_like(img1, dtype=dtype)
p12 = tf.zeros_like(img1, dtype=dtype)
p21 = tf.zeros_like(img1, dtype=dtype)
p22 = tf.zeros_like(img1, dtype=dtype)
gsqx = grad2_x**2
gsqy = grad2_y**2
grad = gsqx + gsqy + 1e-12
rho_c = img2 - grad2_x * u1 - grad2_y * u2 - img1
for _ in range(self._num_iter):
rho = rho_c + grad2_x * u1 + grad2_y * u2 + 1e-12
v1 = tf.zeros_like(img1, dtype=dtype)
v2 = tf.zeros_like(img2, dtype=dtype)
mask1 = rho < -l_t * grad
tmp11 = tf.where(mask1, l_t * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp12 = tf.where(mask1, l_t * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
mask2 = rho > l_t * grad
tmp21 = tf.where(mask2, -l_t * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp22 = tf.where(mask2, -l_t * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
mask3 = (~mask1) & (~mask2) & (grad > 1e-12)
tmp31 = tf.where(mask3, (-rho / grad) * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp32 = tf.where(mask3, (-rho / grad) * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
v1 = tmp11 + tmp21 + tmp31 + u1
v2 = tmp12 + tmp22 + tmp32 + u2
u1 = v1 + self.t * divergence(p11, p12, self.f_grad_x, self.f_grad_y,
'div_p1')
u2 = v2 + self.t * divergence(p21, p22, self.f_grad_x, self.f_grad_y,
'div_p2')
u1x, u1y = forward_grad(u1, self.f_grad_x2, self.f_grad_y2, 'u1')
u2x, u2y = forward_grad(u2, self.f_grad_x2, self.f_grad_y2, 'u2')
p11 = (p11 + taut * u1x) / (1. + taut * tf.sqrt(u1x**2 + u1y**2 + 1e-12))
p12 = (p12 + taut * u1y) / (1. + taut * tf.sqrt(u1x**2 + u1y**2 + 1e-12))
p21 = (p21 + taut * u2x) / (1. + taut * tf.sqrt(u2x**2 + u2y**2 + 1e-12))
p22 = (p22 + taut * u2y) / (1. + taut * tf.sqrt(u2x**2 + u2y**2 + 1e-12))
u1 = tf.reshape(u1, (-1, self._time - 1, tf.shape(u1)[1],
tf.shape(u1)[2], tf.shape(u1)[3]))
u2 = tf.reshape(u2, (-1, self._time - 1, tf.shape(u2)[1],
tf.shape(u2)[2], tf.shape(u2)[3]))
flow = tf.concat([u1, u2], axis=axis + 1)
flow = tf.concat([
flow,
tf.reshape(
flow[:, -1, :, :, :],
(-1, 1, tf.shape(u1)[2], tf.shape(u1)[3], tf.shape(u1)[4] * 2))
],
axis=1)
# padding: [bs, 1, w, h, 2*c] -> [bs, 1, w, h, 2*c]
# flow is [bs, t, w, h, 2*c]
flow = tf.reshape(
flow, (-1, tf.shape(u1)[2], tf.shape(u2)[3], tf.shape(u1)[4] * 2))
# folwo is [bs*t, w, h, 2*c]
if self._bottleneck == 1:
output_shape = residual.shape.as_list()
output_shape[-1] = self._bottleneck * 2
flow = tf.ensure_shape(flow, output_shape)
return flow
else:
flow = self._bottleneck_conv2(flow)
flow = self._batch_norm(flow)
flow = tf.ensure_shape(flow, residual.shape)
return tf.nn.relu(flow + residual)
| 14,584 | 35.012346 | 80 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for the AssembleNet [1] models.
Requires the AssembleNet architecture to be specified in
FLAGS.model_structure (and optionally FLAGS.model_edge_weights).
This structure is a list corresponding to a graph representation of the
network, where a node is a convolutional block and an edge specifies a
connection from one block to another as described in [1].
Each node itself (in the structure list) is a list with the following format:
[block_level, [list_of_input_blocks], number_filter, temporal_dilation,
spatial_stride]. [list_of_input_blocks] should be the list of node indexes whose
values are less than the index of the node itself. The 'stems' of the network
directly taking raw inputs follow a different node format:
[stem_type, temporal_dilation]. The stem_type is -1 for RGB stem and is -2 for
optical flow stem.
Also note that the codes in this file could be used for one-shot differentiable
connection search by (1) giving an overly connected structure as
FLAGS.model_structure and by (2) setting FLAGS.model_edge_weights to be '[]'.
The 'agg_weights' variables will specify which connections are needed and which
are not, once trained.
[1] Michael S. Ryoo, AJ Piergiovanni, Mingxing Tan, Anelia Angelova,
AssembleNet: Searching for Multi-Stream Neural Connectivity in Video
Architectures. ICLR 2020
https://arxiv.org/abs/1905.13209
It uses (2+1)D convolutions for video representations. The main AssembleNet
takes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are
mixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is
necessary. This is to run this on TPU efficiently.
"""
import functools
import math
from typing import Any, Callable, List, Mapping, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.assemblenet.configs import assemblenet as cfg
from official.projects.assemblenet.modeling import rep_flow_2d_layer as rf
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
layers = tf.keras.layers
intermediate_channel_size = [64, 128, 256, 512]
def fixed_padding(inputs, kernel_size):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or `[batch,
height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
data_format = tf.keras.backend.image_data_format()
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def reshape_temporal_conv1d_bn(inputs: tf.Tensor,
filters: int,
kernel_size: int,
num_frames: int = 32,
temporal_dilation: int = 1,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Performs 1D temporal conv.
followed by batch normalization with reshaping.
Args:
inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only
supports 'channels_last' as the data format.
filters: `int` number of filters in the convolution.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
feature_shape = inputs.shape
inputs = tf.reshape(
inputs,
[-1, num_frames, feature_shape[1] * feature_shape[2], feature_shape[3]])
if temporal_dilation == 1:
inputs = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_size, 1),
strides=1,
padding='SAME',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
else:
inputs = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_size, 1),
strides=1,
padding='SAME',
dilation_rate=(temporal_dilation, 1),
use_bias=False,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=math.sqrt(2.0 / (kernel_size * feature_shape[3]))))(
inputs=inputs)
num_channel = inputs.shape[3]
inputs = tf.reshape(inputs,
[-1, feature_shape[1], feature_shape[2], num_channel])
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
return inputs
def conv2d_fixed_padding(inputs: tf.Tensor, filters: int, kernel_size: int,
strides: int):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.keras.layers.Conv2D` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size)
return tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
def conv3d_same_padding(inputs: tf.Tensor,
filters: int,
kernel_size: int,
strides: int,
temporal_dilation: int = 1,
do_2d_conv: bool = False):
"""3D convolution layer wrapper.
Uses conv3d function.
Args:
inputs: 5D `Tensor` following the data_format.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
do_2d_conv: `bool` indicating whether to do 2d conv. If false, do 3D conv.
Returns:
A `Tensor` of shape `[batch, time_in, height_in, width_in, channels]`.
"""
if isinstance(kernel_size, int):
if do_2d_conv:
kernel_size = [1, kernel_size, kernel_size]
else:
kernel_size = [kernel_size, kernel_size, kernel_size]
return tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=[1, strides, strides],
padding='SAME',
dilation_rate=[temporal_dilation, 1, 1],
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
def bottleneck_block_interleave(inputs: tf.Tensor,
filters: int,
inter_filters: int,
strides: int,
use_projection: bool = False,
num_frames: int = 32,
temporal_dilation: int = 1,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
step=1):
"""Interleaves a standard 2D residual module and (2+1)D residual module.
Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch*time, channels, height, width]`.
filters: `int` number of filters for the first conv. layer. The last conv.
layer will use 4 times as many filters.
inter_filters: `int` number of filters for the second conv. layer.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input spatially.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
step: `int` to decide whether to put 2D module or (2+1)D module.
Returns:
The output `Tensor` of the block.
"""
if strides > 1 and not use_projection:
raise ValueError('strides > 1 requires use_projections=True, otherwise the '
'inputs and shortcut will have shape mismatch')
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides)
shortcut = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
shortcut)
if step % 2 == 1:
k = 3
inputs = reshape_temporal_conv1d_bn(
inputs=inputs,
filters=filters,
kernel_size=k,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
else:
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1)
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=inter_filters, kernel_size=3, strides=strides)
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1)
inputs = rf.build_batch_norm(
init_zero=True,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)(
inputs)
return tf.nn.relu(inputs + shortcut)
def block_group(inputs: tf.Tensor,
filters: int,
block_fn: Callable[..., tf.Tensor],
blocks: int,
strides: int,
name,
block_level,
num_frames=32,
temporal_dilation=1):
"""Creates one group of blocks for the AssembleNett model.
Args:
inputs: `Tensor` of size `[batch*time, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
name: `str` name for the Tensor output of the block layer.
block_level: `int` block level in AssembleNet.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
intermediate_channel_size[block_level],
strides,
use_projection=True,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
step=0)
for i in range(1, blocks):
inputs = block_fn(
inputs,
filters,
intermediate_channel_size[block_level],
1,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
step=i)
return tf.identity(inputs, name)
def spatial_resize_and_concat(inputs):
"""Concatenates multiple different sized tensors channel-wise.
Args:
inputs: A list of `Tensors` of size `[batch*time, channels, height, width]`.
Returns:
The output `Tensor` after concatenation.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
# Do nothing if only 1 input
if len(inputs) == 1:
return inputs[0]
if data_format != 'channels_last':
return inputs
# get smallest spatial size and largest channels
sm_size = [1000, 1000]
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[1])
sm_size[1] = min(sm_size[1], inp.shape[2])
for i in range(len(inputs)):
if inputs[i].shape[1] != sm_size[0] or inputs[i].shape[2] != sm_size[1]:
ratio = (inputs[i].shape[1] + 1) // sm_size[0]
inputs[i] = tf.keras.layers.MaxPool2D([ratio, ratio],
ratio,
padding='same')(
inputs[i])
return tf.concat(inputs, 3)
class _ApplyEdgeWeight(layers.Layer):
"""Multiply weight on each input tensor.
A weight is assigned for each connection (i.e., each input tensor). This layer
is used by the multi_connection_fusion to compute the weighted inputs.
"""
def __init__(self,
weights_shape,
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
**kwargs):
"""Constructor.
Args:
weights_shape: shape of the weights. Should equals to [len(inputs)].
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
**kwargs: pass through arguments.
"""
super(_ApplyEdgeWeight, self).__init__(**kwargs)
self._weights_shape = weights_shape
self._index = index
self._use_5d_mode = use_5d_mode
self._model_edge_weights = model_edge_weights
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
def get_config(self):
config = {
'weights_shape': self._weights_shape,
'index': self._index,
'use_5d_mode': self._use_5d_mode,
'model_edge_weights': self._model_edge_weights,
}
base_config = super(_ApplyEdgeWeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
if self._weights_shape[0] == 1:
self._edge_weights = 1.0
return
if self._index is None or not self._model_edge_weights:
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
trainable=True,
name='agg_weights')
else:
initial_weights_after_sigmoid = np.asarray(
self._model_edge_weights[self._index][0]).astype('float32')
# Initial_weights_after_sigmoid is never 0, as the initial weights are
# based the results of a successful connectivity search.
initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.constant_initializer(initial_weights),
trainable=False,
name='agg_weights')
def call(self,
inputs: List[tf.Tensor],
training: Optional[bool] = None) -> Mapping[Any, List[tf.Tensor]]:
use_5d_mode = self._use_5d_mode
dtype = inputs[0].dtype
assert len(inputs) > 1
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
lg_channel = max(lg_channel, inp.shape[-1])
# loads or creates weight variables to fuse multiple inputs
weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))
# Compute weighted inputs. We group inputs with the same channels.
per_channel_inps = dict({0: []})
for i, inp in enumerate(inputs):
if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long
assert sm_size[0] != 0
ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]
if use_5d_mode:
inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],
padding='same')(
inp)
else:
inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,
padding='same')(
inp)
weights = tf.cast(weights, inp.dtype)
if inp.shape[-1] in per_channel_inps:
per_channel_inps[inp.shape[-1]].append(weights[i] * inp)
else:
per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})
return per_channel_inps
def multi_connection_fusion(inputs: List[tf.Tensor],
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None):
"""Do weighted summation of multiple different sized tensors.
A weight is assigned for each connection (i.e., each input tensor), and their
summation weights are learned. Uses spatial max pooling and 1x1 conv.
to match their sizes.
Args:
inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
Returns:
The output `Tensor` after concatenation.
"""
if use_5d_mode:
h_channel_loc = 2
conv_function = conv3d_same_padding
else:
h_channel_loc = 1
conv_function = conv2d_fixed_padding
# If only 1 input.
if len(inputs) == 1:
return inputs[0]
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
lg_channel = max(lg_channel, inp.shape[-1])
per_channel_inps = _ApplyEdgeWeight(
weights_shape=[len(inputs)],
index=index,
use_5d_mode=use_5d_mode,
model_edge_weights=model_edge_weights)(
inputs)
# Adding 1x1 conv layers (to match channel size) and fusing all inputs.
# We add inputs with the same channels first before applying 1x1 conv to save
# memory.
inps = []
for key, channel_inps in per_channel_inps.items():
if len(channel_inps) < 1:
continue
if len(channel_inps) == 1:
if key == lg_channel:
inp = channel_inps[0]
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
else:
if key == lg_channel:
inp = tf.add_n(channel_inps)
else:
inp = conv_function(
tf.add_n(channel_inps), lg_channel, kernel_size=1, strides=1)
inps.append(inp)
return tf.add_n(inps)
def rgb_conv_stem(inputs,
num_frames,
filters,
temporal_dilation,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Layers for a RGB stem.
Args:
inputs: A `Tensor` of size `[batch*time, height, width, channels]`.
num_frames: `int` number of frames in the input tensor.
filters: `int` number of filters in the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
The output `Tensor`.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if temporal_dilation < 1:
temporal_dilation = 1
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=7, strides=2)
inputs = tf.identity(inputs, 'initial_conv')
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = reshape_temporal_conv1d_bn(
inputs=inputs,
filters=filters,
kernel_size=5,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
inputs = tf.keras.layers.MaxPool2D(
pool_size=3, strides=2, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
def flow_conv_stem(inputs,
filters,
temporal_dilation,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Layers for an optical flow stem.
Args:
inputs: A `Tensor` of size `[batch*time, height, width, channels]`.
filters: `int` number of filters in the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
The output `Tensor`.
"""
if temporal_dilation < 1:
temporal_dilation = 1
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=7, strides=2)
inputs = tf.identity(inputs, 'initial_conv')
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = tf.keras.layers.MaxPool2D(
pool_size=2, strides=2, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
def multi_stream_heads(streams,
final_nodes,
num_frames,
num_classes,
max_pool_predictions: bool = False):
"""Layers for the classification heads.
Args:
streams: A list of 4D `Tensors` following the data_format.
final_nodes: A list of `int` where classification heads will be added.
num_frames: `int` number of frames in the input tensor.
num_classes: `int` number of possible classes for video classification.
max_pool_predictions: Use max-pooling on predictions instead of mean
pooling on features. It helps if you have more than 32 frames.
Returns:
The output `Tensor`.
"""
inputs = streams[final_nodes[0]]
num_channels = inputs.shape[-1]
def _pool_and_reshape(net):
# The activation is 7x7 so this is a global average pool.
net = tf.keras.layers.GlobalAveragePooling2D()(inputs=net)
net = tf.identity(net, 'final_avg_pool0')
net = tf.reshape(net, [-1, num_frames, num_channels])
if not max_pool_predictions:
net = tf.reduce_mean(net, 1)
return net
outputs = _pool_and_reshape(inputs)
for i in range(1, len(final_nodes)):
inputs = streams[final_nodes[i]]
inputs = _pool_and_reshape(inputs)
outputs = outputs + inputs
if len(final_nodes) > 1:
outputs = outputs / len(final_nodes)
outputs = tf.keras.layers.Dense(
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=outputs)
outputs = tf.identity(outputs, 'final_dense0')
if max_pool_predictions:
pre_logits = outputs / np.sqrt(num_frames)
acts = tf.nn.softmax(pre_logits, axis=1)
outputs = tf.math.multiply(outputs, acts)
outputs = tf.reduce_sum(outputs, 1)
return outputs
class AssembleNet(tf.keras.Model):
"""AssembleNet backbone."""
def __init__(
self,
block_fn,
num_blocks: List[int],
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
combine_method: str = 'sigmoid',
**kwargs):
"""Generator for AssembleNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Currently only
has `bottleneck_block_interleave as its option`.
num_blocks: list of 4 `int`s denoting the number of blocks to include in
each of the 4 block groups. Each group consists of blocks that take
inputs of the same resolution.
num_frames: the number of frames in the input tensor.
model_structure: AssembleNet model structure in the string format.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
Dimension should be `[batch*time, height, width, channels]`.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
combine_method: 'str' for the weighted summation to fuse different blocks.
**kwargs: pass through arguments.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
data_format = tf.keras.backend.image_data_format()
# Creation of the model graph.
logging.info('model_structure=%r', model_structure)
logging.info('model_structure=%r', model_structure)
logging.info('model_edge_weights=%r', model_edge_weights)
structure = model_structure
original_num_frames = num_frames
assert num_frames > 0, f'Invalid num_frames {num_frames}'
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(structure)):
grouping[structure[i][0]].append(i)
stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])
assert stem_count != 0
stem_filters = 128 // stem_count
original_inputs = inputs
if len(input_specs.shape) == 5:
first_dim = (
input_specs.shape[0] * input_specs.shape[1]
if input_specs.shape[0] and input_specs.shape[1] else -1)
reshape_inputs = tf.reshape(inputs, (first_dim,) + input_specs.shape[2:])
elif len(input_specs.shape) == 4:
reshape_inputs = original_inputs
else:
raise ValueError(
f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')
if grouping[-2]:
# Instead of loading optical flows as inputs from data pipeline, we are
# applying the "Representation Flow" to RGB frames so that we can compute
# the flow within TPU/GPU on fly. It's essentially optical flow since we
# do it with RGBs.
axis = 3 if data_format == 'channels_last' else 1
flow_inputs = rf.RepresentationFlow(
original_num_frames,
depth=reshape_inputs.shape.as_list()[axis],
num_iter=40,
bottleneck=1)(
reshape_inputs)
streams = []
for i in range(len(structure)):
with tf.name_scope('Node_' + str(i)):
if structure[i][0] == -1:
inputs = rgb_conv_stem(
reshape_inputs,
original_num_frames,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -2:
inputs = flow_conv_stem(
flow_inputs,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
else:
num_frames = original_num_frames
block_number = structure[i][0]
combined_inputs = []
if combine_method == 'concat':
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
combined_inputs = spatial_resize_and_concat(combined_inputs)
else:
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
combined_inputs = multi_connection_fusion(
combined_inputs, index=i, model_edge_weights=model_edge_weights)
graph = block_group(
inputs=combined_inputs,
filters=structure[i][2],
block_fn=block_fn,
blocks=num_blocks[block_number],
strides=structure[i][4],
name='block_group' + str(i),
block_level=structure[i][0],
num_frames=num_frames,
temporal_dilation=structure[i][3])
streams.append(graph)
super(AssembleNet, self).__init__(
inputs=original_inputs, outputs=streams, **kwargs)
class AssembleNetModel(tf.keras.Model):
"""An AssembleNet model builder."""
def __init__(self,
backbone,
num_classes,
num_frames: int,
model_structure: List[Any],
input_specs: Optional[Mapping[str,
tf.keras.layers.InputSpec]] = None,
max_pool_predictions: bool = False,
**kwargs):
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'num_frames': num_frames,
'input_specs': input_specs,
'model_structure': model_structure,
}
self._input_specs = input_specs
self._backbone = backbone
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(model_structure)):
grouping[model_structure[i][0]].append(i)
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
streams = self._backbone(inputs['image'])
outputs = multi_stream_heads(
streams,
grouping[3],
num_frames,
num_classes,
max_pool_predictions=max_pool_predictions)
super(AssembleNetModel, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
ASSEMBLENET_SPECS = {
26: {
'block': bottleneck_block_interleave,
'num_blocks': [2, 2, 2, 2]
},
38: {
'block': bottleneck_block_interleave,
'num_blocks': [2, 4, 4, 2]
},
50: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 6, 3]
},
68: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 12, 3]
},
77: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 15, 3]
},
101: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 23, 3]
},
}
def assemblenet_v1(assemblenet_depth: int,
num_classes: int,
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
max_pool_predictions: bool = False,
combine_method: str = 'sigmoid',
**kwargs):
"""Returns the AssembleNet model for a given size and number of output classes."""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if assemblenet_depth not in ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
input_specs_dict = {'image': input_specs}
params = ASSEMBLENET_SPECS[assemblenet_depth]
backbone = AssembleNet(
block_fn=params['block'],
num_blocks=params['num_blocks'],
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
combine_method=combine_method,
**kwargs)
return AssembleNetModel(
backbone,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=max_pool_predictions,
**kwargs)
@backbone_factory.register_backbone_builder('assemblenet')
def build_assemblenet_v1(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds assemblenet backbone."""
del l2_regularizer
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert 'assemblenet' in backbone_type
assemblenet_depth = int(backbone_cfg.model_id)
if assemblenet_depth not in ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
model_structure, model_edge_weights = cfg.blocks_to_flat_lists(
backbone_cfg.blocks)
params = ASSEMBLENET_SPECS[assemblenet_depth]
block_fn = functools.partial(
params['block'],
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
backbone = AssembleNet(
block_fn=block_fn,
num_blocks=params['num_blocks'],
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
combine_method=backbone_cfg.combine_method,
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
logging.info('Number of parameters in AssembleNet backbone: %f M.',
backbone.count_params() / 10.**6)
return backbone
@model_factory.register_model_builder('assemblenet')
def build_assemblenet_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.AssembleNetModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds assemblenet model."""
input_specs_dict = {'image': input_specs}
backbone = build_assemblenet_v1(input_specs, model_config.backbone,
model_config.norm_activation, l2_regularizer)
backbone_cfg = model_config.backbone.get()
model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)
model = AssembleNetModel(
backbone,
num_classes=num_classes,
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=model_config.max_pool_predictions)
return model
| 38,298 | 34.593866 | 127 | py |
models | models-master/official/projects/text_classification_example/classification_example.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifcation Task Showcase."""
import dataclasses
from typing import List, Mapping, Text
from seqeval import metrics as seqeval_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.projects.text_classification_example import classification_data_loader
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
head_dropout: float = 0.1
head_initializer_range: float = 0.02
@dataclasses.dataclass
class ClassificationExampleConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
model: ModelConfig = ModelConfig()
num_classes = 2
class_names = ['A', 'B']
train_data: cfg.DataConfig = dataclasses.field(
default_factory=classification_data_loader.ClassificationExampleDataConfig
)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=classification_data_loader.ClassificationExampleDataConfig
)
class ClassificationExampleTask(base_task.Task):
"""Task object for classification."""
def build_model(self) -> tf.keras.Model:
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
return models.BertClassifier(
network=encoder_network,
num_classes=len(self.task_config.class_names),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.head_initializer_range),
dropout_rate=self.task_config.model.head_dropout)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, tf.cast(model_outputs, tf.float32), from_logits=True)
return tf_utils.safe_mean(loss)
def build_inputs(self,
params: cfg.DataConfig,
input_context=None) -> tf.data.Dataset:
"""Returns tf.data.Dataset for sentence_prediction task."""
loader = classification_data_loader.ClassificationDataLoader(params)
return loader.load(input_context)
def inference_step(self, inputs,
model: tf.keras.Model) -> Mapping[str, tf.Tensor]:
"""Performs the forward step."""
logits = model(inputs, training=False)
return {
'logits': logits,
'predict_ids': tf.argmax(logits, axis=-1, output_type=tf.int32)
}
def validation_step(self,
inputs,
model: tf.keras.Model,
metrics=None) -> Mapping[str, tf.Tensor]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(labels=labels, model_outputs=outputs['logits'])
# Negative label ids are padding labels which should be ignored.
real_label_index = tf.where(tf.greater_equal(labels, 0))
predict_ids = tf.gather_nd(outputs['predict_ids'], real_label_index)
label_ids = tf.gather_nd(labels, real_label_index)
return {
self.loss: loss,
'predict_ids': predict_ids,
'label_ids': label_ids,
}
def aggregate_logs(self,
state=None,
step_outputs=None) -> Mapping[Text, List[List[Text]]]:
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {'predict_class': [], 'label_class': []}
def id_to_class_name(batched_ids):
class_names = []
for per_example_ids in batched_ids:
class_names.append([])
for per_token_id in per_example_ids.numpy().tolist():
class_names[-1].append(self.task_config.class_names[per_token_id])
return class_names
# Convert id to class names, because `seqeval_metrics` relies on the class
# name to decide IOB tags.
state['predict_class'].extend(id_to_class_name(step_outputs['predict_ids']))
state['label_class'].extend(id_to_class_name(step_outputs['label_ids']))
return state
def reduce_aggregated_logs(self,
aggregated_logs,
global_step=None) -> Mapping[Text, float]:
"""Reduces aggregated logs over validation steps."""
label_class = aggregated_logs['label_class']
predict_class = aggregated_logs['predict_class']
return {
'f1':
seqeval_metrics.f1_score(label_class, predict_class),
'precision':
seqeval_metrics.precision_score(label_class, predict_class),
'recall':
seqeval_metrics.recall_score(label_class, predict_class),
'accuracy':
seqeval_metrics.accuracy_score(label_class, predict_class),
}
@exp_factory.register_config_factory('example_bert_classification_example')
def bert_classification_example() -> cfg.ExperimentConfig:
"""Return a minimum experiment config for Bert token classification."""
return cfg.ExperimentConfig(
task=ClassificationExampleConfig(),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
| 7,073 | 35.84375 | 84 | py |
models | models-master/official/projects/text_classification_example/classification_example_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp.projects.example.classification_example."""
import tensorflow as tf
from official.core import config_definitions as cfg
from official.nlp.configs import encoders
from official.projects.text_classification_example import classification_data_loader
from official.projects.text_classification_example import classification_example
class ClassificationExampleTest(tf.test.TestCase):
def get_model_config(self):
return classification_example.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=2)))
def get_dummy_dataset(self, params: cfg.DataConfig):
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = tf.zeros((1, 1), dtype=tf.int32)
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def test_task_with_dummy_data(self):
train_data_config = (
classification_data_loader.ClassificationExampleDataConfig(
input_path='dummy', seq_length=128, global_batch_size=1))
task_config = classification_example.ClassificationExampleConfig(
model=self.get_model_config(),)
task = classification_example.ClassificationExampleTask(task_config)
task.build_inputs = self.get_dummy_dataset
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(train_data_config)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.initialize(model)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| 2,496 | 34.671429 | 84 | py |
models | models-master/official/projects/labse/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports the LaBSE model and its preprocessing as SavedModels for TF Hub.
Example usage:
# Point this variable to your training results.
# Note that flag --do_lower_case is inferred from the name.
LaBSE_DIR=<Your LaBSE model dir>
# Step 1: export the core LaBSE model.
python3 ./export_tfhub.py \
--bert_config_file ${LaBSE_DIR:?}/bert_config.json \
--model_checkpoint_path ${LaBSE_DIR:?}/labse_model.ckpt \
--vocab_file ${LaBSE_DIR:?}/vocab.txt \
--export_type model --export_path /tmp/labse_model
# Step 2: export matching preprocessing (be sure to use same flags).
python3 ./export_tfhub.py \
--vocab_file ${LaBSE_DIR:?}/vocab.txt \
--export_type preprocessing --export_path /tmp/labse_preprocessing
"""
from typing import Text
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.legacy.bert import bert_models
from official.legacy.bert import configs
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.nlp.tools import export_tfhub_lib
FLAGS = flags.FLAGS
flags.DEFINE_enum("export_type", "model", ["model", "preprocessing"],
"The type of model to export")
flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.")
flags.DEFINE_string(
"bert_tfhub_module", None,
"Bert tfhub module to define core bert layers. Needed for --export_type "
"model.")
flags.DEFINE_string(
"bert_config_file", None,
"Bert configuration file to define core bert layers. It will not be used "
"if bert_tfhub_module is set. Needed for --export_type model.")
flags.DEFINE_string(
"model_checkpoint_path", None, "File path to TF model checkpoint. "
"Needed for --export_type model.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the BERT model was trained on. "
"Needed for both --export_type model and preprocessing.")
flags.DEFINE_bool(
"do_lower_case", None,
"Whether to lowercase before tokenization. If left as None, "
"do_lower_case will be enabled if 'uncased' appears in the "
"name of --vocab_file. "
"Needed for both --export_type model and preprocessing.")
flags.DEFINE_integer(
"default_seq_length", 128,
"The sequence length of preprocessing results from "
"top-level preprocess method. This is also the default "
"sequence length for the bert_pack_inputs subobject."
"Needed for --export_type preprocessing.")
flags.DEFINE_bool(
"tokenize_with_offsets", False, # TODO(b/181866850)
"Whether to export a .tokenize_with_offsets subobject for "
"--export_type preprocessing.")
flags.DEFINE_bool(
"normalize", True,
"Parameter of DualEncoder model, normalize the embedding (pooled_output) "
"if set to True.")
def _get_do_lower_case(do_lower_case, vocab_file):
"""Returns do_lower_case, replacing None by a guess from vocab file name."""
if do_lower_case is None:
do_lower_case = "uncased" in vocab_file
logging.info("Using do_lower_case=%s based on name of vocab_file=%s",
do_lower_case, vocab_file)
return do_lower_case
def create_labse_model(bert_tfhub_module: Text,
bert_config: configs.BertConfig,
normalize: bool) -> tf.keras.Model:
"""Creates a LaBSE keras core model from BERT configuration.
Args:
bert_tfhub_module: The bert tfhub module path. The LaBSE will be built upon
the tfhub module if it is not empty.
bert_config: A `BertConfig` to create the core model. Used if
bert_tfhub_module is empty.
normalize: Parameter of DualEncoder model, normalize the embedding (
pooled_output) if set to True.
Returns:
A keras model.
"""
if bert_tfhub_module:
encoder_network = utils.get_encoder_from_hub(bert_tfhub_module)
else:
encoder_network = bert_models.get_transformer_encoder(
bert_config, sequence_length=None)
labse_model = models.DualEncoder(
network=encoder_network,
max_seq_length=None,
normalize=normalize,
output="predictions")
return labse_model, encoder_network # pytype: disable=bad-return-type # typed-keras
def export_labse_model(bert_tfhub_module: Text, bert_config: configs.BertConfig,
model_checkpoint_path: Text, hub_destination: Text,
vocab_file: Text, do_lower_case: bool, normalize: bool):
"""Restores a tf.keras.Model and saves for TF-Hub."""
core_model, encoder = create_labse_model(
bert_tfhub_module, bert_config, normalize)
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()
core_model.vocab_file = tf.saved_model.Asset(vocab_file)
core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
def main(_):
do_lower_case = export_tfhub_lib.get_do_lower_case(FLAGS.do_lower_case,
FLAGS.vocab_file)
if FLAGS.export_type == "model":
if FLAGS.bert_tfhub_module:
bert_config = None
else:
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
export_labse_model(FLAGS.bert_tfhub_module, bert_config,
FLAGS.model_checkpoint_path, FLAGS.export_path,
FLAGS.vocab_file, do_lower_case, FLAGS.normalize)
elif FLAGS.export_type == "preprocessing":
# LaBSE is still a BERT model, reuse the export_bert_preprocessing here.
export_tfhub_lib.export_bert_preprocessing(
FLAGS.export_path, FLAGS.vocab_file, do_lower_case,
FLAGS.default_seq_length, FLAGS.tokenize_with_offsets)
else:
raise app.UsageError("Unknown value '%s' for flag --export_type")
if __name__ == "__main__":
app.run(main)
| 6,473 | 38.962963 | 87 | py |
models | models-master/official/projects/labse/export_tfhub_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests LaBSE's export_tfhub."""
import os
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from official.legacy.bert import configs
from official.projects.labse import export_tfhub
class ExportModelTest(tf.test.TestCase):
def test_export_model(self):
# Exports a savedmodel for TF-Hub
hidden_size = 16
bert_config = configs.BertConfig(
vocab_size=100,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=1)
labse_model, encoder = export_tfhub.create_labse_model(
None, bert_config, normalize=True)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
with tf.io.gfile.GFile(vocab_file, "w") as f:
f.write("dummy content")
hub_destination = os.path.join(self.get_temp_dir(), "hub")
export_tfhub.export_labse_model(
None, # bert_tfhub_module
bert_config,
model_checkpoint_path,
hub_destination,
vocab_file,
do_lower_case=True,
normalize=True)
# Restores a hub KerasLayer.
hub_layer = hub.KerasLayer(hub_destination, trainable=True)
if hasattr(hub_layer, "resolved_object"):
# Checks meta attributes.
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
with tf.io.gfile.GFile(
hub_layer.resolved_object.vocab_file.asset_path.numpy()) as f:
self.assertEqual("dummy content", f.read())
# Checks the hub KerasLayer.
for source_weight, hub_weight in zip(labse_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight.numpy(), hub_weight.numpy())
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
hub_outputs = hub_layer([dummy_ids, dummy_ids, dummy_ids])
source_outputs = labse_model([dummy_ids, dummy_ids, dummy_ids])
self.assertEqual(hub_outputs["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_outputs["sequence_output"].shape,
(2, seq_length, hidden_size))
for output_name in source_outputs:
self.assertAllClose(hub_outputs[output_name].numpy(),
hub_outputs[output_name].numpy())
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
inputs = [input_ids, np.ones_like(input_ids), np.zeros_like(input_ids)]
outputs = np.concatenate([
hub_layer(inputs, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
outputs = hub_layer([input_word_ids, input_mask, input_type_ids])
self.assertEqual(outputs["pooled_output"].shape.as_list(),
[None, hidden_size])
self.assertEqual(outputs["sequence_output"].shape.as_list(),
[None, seq_length, hidden_size])
if __name__ == "__main__":
tf.test.main()
| 4,422 | 38.491071 | 79 | py |
models | models-master/official/projects/pruning/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.core import task_factory
from official.projects.pruning.configs import image_classification as exp_cfg
from official.vision.modeling.backbones import mobilenet
from official.vision.modeling.layers import nn_blocks
from official.vision.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification with pruning."""
_BLOCK_LAYER_SUFFIX_MAP = {
mobilenet.Conv2DBNBlock: ('conv2d/kernel:0',),
nn_blocks.BottleneckBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
'conv2d_3/kernel:0',
),
nn_blocks.InvertedBottleneckBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
'conv2d_3/kernel:0',
'depthwise_conv2d/depthwise_kernel:0',
),
nn_blocks.ResidualBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
),
}
def build_model(self) -> tf.keras.Model:
"""Builds classification model with pruning."""
model = super(ImageClassificationTask, self).build_model()
if self.task_config.pruning is None:
return model
pruning_cfg = self.task_config.pruning
prunable_model = tf.keras.models.clone_model(
model,
clone_function=self._make_block_prunable,
)
original_checkpoint = pruning_cfg.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=prunable_model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
pruning_params = {}
if pruning_cfg.sparsity_m_by_n is not None:
pruning_params['sparsity_m_by_n'] = pruning_cfg.sparsity_m_by_n
if pruning_cfg.pruning_schedule == 'PolynomialDecay':
pruning_params['pruning_schedule'] = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=pruning_cfg.initial_sparsity,
final_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
end_step=pruning_cfg.end_step,
frequency=pruning_cfg.frequency)
elif pruning_cfg.pruning_schedule == 'ConstantSparsity':
pruning_params[
'pruning_schedule'] = tfmot.sparsity.keras.ConstantSparsity(
target_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
frequency=pruning_cfg.frequency)
else:
raise NotImplementedError(
'Only PolynomialDecay and ConstantSparsity are currently supported. Not support %s'
% pruning_cfg.pruning_schedule)
pruned_model = tfmot.sparsity.keras.prune_low_magnitude(
prunable_model, **pruning_params)
# Print out prunable weights for debugging purpose.
prunable_layers = collect_prunable_layers(pruned_model)
pruned_weights = []
for layer in prunable_layers:
pruned_weights += [weight.name for weight, _, _ in layer.pruning_vars]
unpruned_weights = [
weight.name
for weight in pruned_model.weights
if weight.name not in pruned_weights
]
logging.info(
'%d / %d weights are pruned.\nPruned weights: [ \n%s \n],\n'
'Unpruned weights: [ \n%s \n],',
len(pruned_weights), len(model.weights), ', '.join(pruned_weights),
', '.join(unpruned_weights))
return pruned_model
def _make_block_prunable(
self, layer: tf.keras.layers.Layer) -> tf.keras.layers.Layer:
if isinstance(layer, tf.keras.Model):
return tf.keras.models.clone_model(
layer, input_tensors=None, clone_function=self._make_block_prunable)
if layer.__class__ not in self._BLOCK_LAYER_SUFFIX_MAP:
return layer
prunable_weights = []
for layer_suffix in self._BLOCK_LAYER_SUFFIX_MAP[layer.__class__]:
for weight in layer.weights:
if weight.name.endswith(layer_suffix):
prunable_weights.append(weight)
def get_prunable_weights():
return prunable_weights
layer.get_prunable_weights = get_prunable_weights
return layer
def collect_prunable_layers(model):
"""Recursively collect the prunable layers in the model."""
prunable_layers = []
for layer in model.layers:
if isinstance(layer, tf.keras.Model):
prunable_layers += collect_prunable_layers(layer)
if layer.__class__.__name__ == 'PruneLowMagnitude':
prunable_layers.append(layer)
return prunable_layers
| 5,362 | 35.236486 | 93 | py |
models | models-master/official/projects/detr/serving/export_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export module for DETR model."""
import tensorflow as tf
from official.projects.detr.modeling import detr
from official.vision.modeling import backbones
from official.vision.ops import preprocess_ops
from official.vision.serving import detection
class DETRModule(detection.DetectionModule):
"""DETR detection module."""
def _build_model(self) -> tf.keras.Model:
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size +
[self._num_channels])
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self.params.task.model.backbone,
norm_activation_config=self.params.task.model.norm_activation)
model = detr.DETR(backbone, self.params.task.model.backbone_endpoint_name,
self.params.task.model.num_queries,
self.params.task.model.hidden_size,
self.params.task.model.num_classes,
self.params.task.model.num_encoder_layers,
self.params.task.model.num_decoder_layers)
model(tf.keras.Input(input_specs.shape[1:]))
return model
def _build_inputs(self, image: tf.Tensor) -> tuple[tf.Tensor, tf.Tensor]:
"""Builds detection model inputs for serving."""
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image, image_info = preprocess_ops.resize_image(
image, size=self._input_image_size)
return image, image_info
def serve(self, images: tf.Tensor) -> dict[str, tf.Tensor]:
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
image_info = None
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.inference_step(images)[-1]
outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections']
}
if image_info is not None:
outputs['detection_boxes'] = outputs['detection_boxes'] * tf.expand_dims(
tf.concat([
image_info[:, 1:2, 0], image_info[:, 1:2, 1],
image_info[:, 1:2, 0], image_info[:, 1:2, 1]
],
axis=1),
axis=1)
outputs.update({'image_info': image_info})
return outputs
| 3,919 | 36.692308 | 79 | py |
models | models-master/official/projects/detr/modeling/detr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements End-to-End Object Detection with Transformers.
Model paper: https://arxiv.org/abs/2005.12872
This module does not support Keras de/serialization. Please use
tf.train.Checkpoint for object based saving and loading and tf.saved_model.save
for graph serializaiton.
"""
import math
from typing import Any, List
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.detr.modeling import transformer
from official.vision.ops import box_ops
def position_embedding_sine(attention_mask,
num_pos_features=256,
temperature=10000.,
normalize=True,
scale=2 * math.pi):
"""Sine-based positional embeddings for 2D images.
Args:
attention_mask: a `bool` Tensor specifying the size of the input image to
the Transformer and which elements are padded, of size [batch_size,
height, width]
num_pos_features: a `int` specifying the number of positional features,
should be equal to the hidden size of the Transformer network
temperature: a `float` specifying the temperature of the positional
embedding. Any type that is converted to a `float` can also be accepted.
normalize: a `bool` determining whether the positional embeddings should be
normalized between [0, scale] before application of the sine and cos
functions.
scale: a `float` if normalize is True specifying the scale embeddings before
application of the embedding function.
Returns:
embeddings: a `float` tensor of the same shape as input_tensor specifying
the positional embeddings based on sine features.
"""
if num_pos_features % 2 != 0:
raise ValueError(
"Number of embedding features (num_pos_features) must be even when "
"column and row embeddings are concatenated.")
num_pos_features = num_pos_features // 2
# Produce row and column embeddings based on total size of the image
# <tf.float>[batch_size, height, width]
attention_mask = tf.cast(attention_mask, tf.float32)
row_embedding = tf.cumsum(attention_mask, 1)
col_embedding = tf.cumsum(attention_mask, 2)
if normalize:
eps = 1e-6
row_embedding = row_embedding / (row_embedding[:, -1:, :] + eps) * scale
col_embedding = col_embedding / (col_embedding[:, :, -1:] + eps) * scale
dim_t = tf.range(num_pos_features, dtype=row_embedding.dtype)
dim_t = tf.pow(temperature, 2 * (dim_t // 2) / num_pos_features)
# Creates positional embeddings for each row and column position
# <tf.float>[batch_size, height, width, num_pos_features]
pos_row = tf.expand_dims(row_embedding, -1) / dim_t
pos_col = tf.expand_dims(col_embedding, -1) / dim_t
pos_row = tf.stack(
[tf.sin(pos_row[:, :, :, 0::2]),
tf.cos(pos_row[:, :, :, 1::2])], axis=4)
pos_col = tf.stack(
[tf.sin(pos_col[:, :, :, 0::2]),
tf.cos(pos_col[:, :, :, 1::2])], axis=4)
# final_shape = pos_row.shape.as_list()[:3] + [-1]
final_shape = tf_utils.get_shape_list(pos_row)[:3] + [-1]
pos_row = tf.reshape(pos_row, final_shape)
pos_col = tf.reshape(pos_col, final_shape)
output = tf.concat([pos_row, pos_col], -1)
embeddings = tf.cast(output, tf.float32)
return embeddings
def postprocess(outputs: dict[str, tf.Tensor]) -> dict[str, tf.Tensor]:
"""Performs post-processing on model output.
Args:
outputs: The raw model output.
Returns:
Postprocessed model output.
"""
predictions = {
"detection_boxes": # Box coordinates are relative values here.
box_ops.cycxhw_to_yxyx(outputs["box_outputs"]),
"detection_scores":
tf.math.reduce_max(
tf.nn.softmax(outputs["cls_outputs"])[:, :, 1:], axis=-1),
"detection_classes":
tf.math.argmax(outputs["cls_outputs"][:, :, 1:], axis=-1) + 1,
# Fix this. It's not being used at the moment.
"num_detections":
tf.reduce_sum(
tf.cast(
tf.math.greater(
tf.math.reduce_max(outputs["cls_outputs"], axis=-1), 0),
tf.int32),
axis=-1)
}
return predictions
class DETR(tf.keras.Model):
"""DETR model with Keras.
DETR consists of backbone, query embedding, DETRTransformer,
class and box heads.
"""
def __init__(self,
backbone,
backbone_endpoint_name,
num_queries,
hidden_size,
num_classes,
num_encoder_layers=6,
num_decoder_layers=6,
dropout_rate=0.1,
**kwargs):
super().__init__(**kwargs)
self._num_queries = num_queries
self._hidden_size = hidden_size
self._num_classes = num_classes
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._dropout_rate = dropout_rate
if hidden_size % 2 != 0:
raise ValueError("hidden_size must be a multiple of 2.")
self._backbone = backbone
self._backbone_endpoint_name = backbone_endpoint_name
def build(self, input_shape=None):
self._input_proj = tf.keras.layers.Conv2D(
self._hidden_size, 1, name="detr/conv2d")
self._build_detection_decoder()
super().build(input_shape)
def _build_detection_decoder(self):
"""Builds detection decoder."""
self._transformer = DETRTransformer(
num_encoder_layers=self._num_encoder_layers,
num_decoder_layers=self._num_decoder_layers,
dropout_rate=self._dropout_rate)
self._query_embeddings = self.add_weight(
"detr/query_embeddings",
shape=[self._num_queries, self._hidden_size],
initializer=tf.keras.initializers.RandomNormal(mean=0., stddev=1.),
dtype=tf.float32)
sqrt_k = math.sqrt(1.0 / self._hidden_size)
self._class_embed = tf.keras.layers.Dense(
self._num_classes,
kernel_initializer=tf.keras.initializers.RandomUniform(-sqrt_k, sqrt_k),
name="detr/cls_dense")
self._bbox_embed = [
tf.keras.layers.Dense(
self._hidden_size, activation="relu",
kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_0"),
tf.keras.layers.Dense(
self._hidden_size, activation="relu",
kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_1"),
tf.keras.layers.Dense(
4, kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_2")]
self._sigmoid = tf.keras.layers.Activation("sigmoid")
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
def get_config(self):
return {
"backbone": self._backbone,
"backbone_endpoint_name": self._backbone_endpoint_name,
"num_queries": self._num_queries,
"hidden_size": self._hidden_size,
"num_classes": self._num_classes,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"dropout_rate": self._dropout_rate,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def _generate_image_mask(self, inputs: tf.Tensor,
target_shape: tf.Tensor) -> tf.Tensor:
"""Generates image mask from input image."""
mask = tf.expand_dims(
tf.cast(tf.not_equal(tf.reduce_sum(inputs, axis=-1), 0), inputs.dtype),
axis=-1)
mask = tf.image.resize(
mask, target_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return mask
def call(self, inputs: tf.Tensor, training: bool = None) -> List[Any]: # pytype: disable=signature-mismatch # overriding-parameter-count-checks
batch_size = tf.shape(inputs)[0]
features = self._backbone(inputs)[self._backbone_endpoint_name]
shape = tf.shape(features)
mask = self._generate_image_mask(inputs, shape[1: 3])
pos_embed = position_embedding_sine(
mask[:, :, :, 0], num_pos_features=self._hidden_size)
pos_embed = tf.reshape(pos_embed, [batch_size, -1, self._hidden_size])
features = tf.reshape(
self._input_proj(features), [batch_size, -1, self._hidden_size])
mask = tf.reshape(mask, [batch_size, -1])
decoded_list = self._transformer({
"inputs":
features,
"targets":
tf.tile(
tf.expand_dims(self._query_embeddings, axis=0),
(batch_size, 1, 1)),
"pos_embed": pos_embed,
"mask": mask,
})
out_list = []
for decoded in decoded_list:
decoded = tf.stack(decoded)
output_class = self._class_embed(decoded)
box_out = decoded
for layer in self._bbox_embed:
box_out = layer(box_out)
output_coord = self._sigmoid(box_out)
out = {"cls_outputs": output_class, "box_outputs": output_coord}
if not training:
out.update(postprocess(out))
out_list.append(out)
return out_list
class DETRTransformer(tf.keras.layers.Layer):
"""Encoder and Decoder of DETR."""
def __init__(self, num_encoder_layers=6, num_decoder_layers=6,
dropout_rate=0.1, **kwargs):
super().__init__(**kwargs)
self._dropout_rate = dropout_rate
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
def build(self, input_shape=None):
if self._num_encoder_layers > 0:
self._encoder = transformer.TransformerEncoder(
attention_dropout_rate=self._dropout_rate,
dropout_rate=self._dropout_rate,
intermediate_dropout=self._dropout_rate,
norm_first=False,
num_layers=self._num_encoder_layers)
else:
self._encoder = None
self._decoder = transformer.TransformerDecoder(
attention_dropout_rate=self._dropout_rate,
dropout_rate=self._dropout_rate,
intermediate_dropout=self._dropout_rate,
norm_first=False,
num_layers=self._num_decoder_layers)
super().build(input_shape)
def get_config(self):
return {
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"dropout_rate": self._dropout_rate,
}
def call(self, inputs):
sources = inputs["inputs"]
targets = inputs["targets"]
pos_embed = inputs["pos_embed"]
mask = inputs["mask"]
input_shape = tf_utils.get_shape_list(sources)
source_attention_mask = tf.tile(
tf.expand_dims(mask, axis=1), [1, input_shape[1], 1])
if self._encoder is not None:
memory = self._encoder(
sources, attention_mask=source_attention_mask, pos_embed=pos_embed)
else:
memory = sources
target_shape = tf_utils.get_shape_list(targets)
cross_attention_mask = tf.tile(
tf.expand_dims(mask, axis=1), [1, target_shape[1], 1])
target_shape = tf.shape(targets)
decoded = self._decoder(
tf.zeros_like(targets),
memory,
# TODO(b/199545430): self_attention_mask could be set to None when this
# bug is resolved. Passing ones for now.
self_attention_mask=tf.ones(
(target_shape[0], target_shape[1], target_shape[1])),
cross_attention_mask=cross_attention_mask,
return_all_decoder_outputs=True,
input_pos_embed=targets,
memory_pos_embed=pos_embed)
return decoded
| 12,128 | 35.643505 | 147 | py |
models | models-master/official/projects/detr/modeling/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specialized Transformers for DETR.
the position embeddings are added to the query and key for every self- and
cross-attention layer.
"""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder.
Transformer encoder is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer encoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerEncoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.encoder_layers = []
for i in range(self.num_layers):
self.encoder_layers.append(
TransformerEncoderBlock(
num_attention_heads=self.num_attention_heads,
inner_dim=self._intermediate_size,
inner_activation=self._activation,
output_dropout=self._dropout_rate,
attention_dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
inner_dropout=self._intermediate_dropout,
attention_initializer=tf_utils.clone_initializer(
models.seq2seq_transformer.attention_initializer(
input_shape[2])),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=self._norm_epsilon, dtype="float32")
super(TransformerEncoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerEncoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, encoder_inputs, attention_mask=None, pos_embed=None):
"""Return the output of the encoder.
Args:
encoder_inputs: A tensor with shape `(batch_size, input_length,
hidden_size)`.
attention_mask: A mask for the encoder self-attention layer with shape
`(batch_size, input_length, input_length)`.
pos_embed: Position embedding to add to every encoder layer.
Returns:
Output of encoder which is a `float32` tensor with shape
`(batch_size, input_length, hidden_size)`.
"""
for layer_idx in range(self.num_layers):
encoder_inputs = self.encoder_layers[layer_idx](
[encoder_inputs, attention_mask, pos_embed])
output_tensor = encoder_inputs
output_tensor = self.output_normalization(output_tensor)
return output_tensor
class TransformerEncoderBlock(tf.keras.layers.Layer):
"""TransformerEncoderBlock layer.
This layer implements the Transformer Encoder from
"Attention Is All You Need". (https://arxiv.org/abs/1706.03762),
which combines a `tf.keras.layers.MultiHeadAttention` layer with a
two-layer feedforward network. The only difference: position embedding is
added to the query and key of self-attention.
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `TransformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(TransformerEncoderBlock, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self._num_heads,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"output_dropout": self._output_dropout_rate,
"attention_dropout": self._attention_dropout_rate,
"output_range": self._output_range,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"inner_dropout": self._inner_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
"attention_axes": self._attention_axes,
}
base_config = super(TransformerEncoderBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`input tensor`, `attention mask`, `query
embed`] to have an additional position embedding to add.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
input_tensor, attention_mask, pos_embed = inputs
key_value = None
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor + pos_embed,
key=key_value + pos_embed,
value=key_value,
attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
class TransformerDecoder(tf.keras.layers.Layer):
"""Transformer decoder.
Like the encoder, the decoder is made up of N identical layers.
Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer decoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set `False`,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set `False`, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerDecoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.decoder_layers = []
for i in range(self.num_layers):
self.decoder_layers.append(
TransformerDecoderBlock(
num_attention_heads=self.num_attention_heads,
intermediate_size=self._intermediate_size,
intermediate_activation=self._activation,
dropout_rate=self._dropout_rate,
attention_dropout_rate=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
intermediate_dropout=self._intermediate_dropout,
attention_initializer=tf_utils.clone_initializer(
models.seq2seq_transformer.attention_initializer(
input_shape[2])),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=self._norm_epsilon, dtype="float32")
super(TransformerDecoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
target,
memory,
self_attention_mask=None,
cross_attention_mask=None,
cache=None,
decode_loop_step=None,
return_all_decoder_outputs=False,
input_pos_embed=None,
memory_pos_embed=None):
"""Return the output of the decoder layer stacks.
Args:
target: A tensor with shape `(batch_size, target_length, hidden_size)`.
memory: A tensor with shape `(batch_size, input_length, hidden_size)`.
self_attention_mask: A tensor with shape `(batch_size, target_len,
target_length)`, the mask for decoder self-attention layer.
cross_attention_mask: A tensor with shape `(batch_size, target_length,
input_length)` which is the mask for encoder-decoder attention layer.
cache: (Used for fast decoding) A nested dictionary storing previous
decoder self-attention values. The items are:
{layer_n: {"k": A tensor with shape `(batch_size, i, key_channels)`,
"v": A tensor with shape `(batch_size, i, value_channels)`},
...}
decode_loop_step: An integer, the step number of the decoding loop. Used
only for autoregressive inference on TPU.
return_all_decoder_outputs: Return all decoder layer outputs. Note that
the outputs are layer normed. This is useful when introducing per layer
auxiliary loss.
input_pos_embed: A tensor that is added to the query and key of the
self-attention layer.
memory_pos_embed: A tensor that is added to the query and key of the
cross-attention layer.
Returns:
Output of decoder.
float32 tensor with shape `(batch_size, target_length, hidden_size`).
"""
output_tensor = target
decoder_outputs = []
for layer_idx in range(self.num_layers):
transformer_inputs = [
output_tensor, memory, cross_attention_mask, self_attention_mask,
input_pos_embed, memory_pos_embed
]
# Gets the cache for decoding.
if cache is None:
output_tensor, _ = self.decoder_layers[layer_idx](transformer_inputs)
else:
cache_layer_idx = str(layer_idx)
output_tensor, cache[cache_layer_idx] = self.decoder_layers[layer_idx](
transformer_inputs,
cache=cache[cache_layer_idx],
decode_loop_step=decode_loop_step)
if return_all_decoder_outputs:
decoder_outputs.append(self.output_normalization(output_tensor))
if return_all_decoder_outputs:
return decoder_outputs
else:
return self.output_normalization(output_tensor)
class TransformerDecoderBlock(tf.keras.layers.Layer):
"""Single transformer layer for decoder.
It has three sub-layers:
(1) a multi-head self-attention mechanism.
(2) a encoder-decoder attention.
(3) a positionwise fully connected feed-forward network.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
"""Initialize a Transformer decoder block.
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output
dropout.
attention_dropout_rate: Dropout probability for within the attention
layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super().__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf.keras.activations.get(
intermediate_activation)
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._cross_attention_cls = layers.attention.MultiHeadAttention
def build(self, input_shape):
target_tensor_shape = tf.TensorShape(input_shape[0])
if len(target_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
hidden_size = target_tensor_shape[2]
if hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self.num_attention_heads))
self.attention_head_size = int(hidden_size) // self.num_attention_heads
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Self attention.
self.self_attention = layers.attention.CachedAttention(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
name="self_attention",
**common_kwargs)
self.self_attention_output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="output",
**common_kwargs)
self.self_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.self_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Encoder-decoder attention.
self.encdec_attention = self._cross_attention_cls(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
output_shape=hidden_size,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
name="attention/encdec",
**common_kwargs)
self.encdec_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.encdec_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="attention/encdec_output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Feed-forward projection.
self.intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self.intermediate_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
self.intermediate_activation_layer = tf.keras.layers.Activation(
self.intermediate_activation)
self._intermediate_dropout_layer = tf.keras.layers.Dropout(
rate=self._intermediate_dropout)
self.output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="output",
**common_kwargs)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32")
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self.intermediate_size,
"intermediate_activation": tf_utils.serialize_activation(
self.intermediate_activation, use_legacy_format=True
),
"dropout_rate": self.dropout_rate,
"attention_dropout_rate": self.attention_dropout_rate,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def common_layers_with_encoder(self):
"""Gets layer objects that can make a Transformer encoder block."""
return [
self.self_attention, self.self_attention_layer_norm,
self.intermediate_dense, self.output_dense, self.output_layer_norm
]
def call(self, inputs, cache=None, decode_loop_step=None):
input_tensor, memory, attention_mask, self_attention_mask, input_pos_embed, memory_pos_embed = inputs
source_tensor = input_tensor
if self._norm_first:
input_tensor = self.self_attention_layer_norm(input_tensor)
self_attention_output, cache = self.self_attention(
query=input_tensor + input_pos_embed,
key=input_tensor + input_pos_embed,
value=input_tensor,
attention_mask=self_attention_mask,
cache=cache,
decode_loop_step=decode_loop_step)
self_attention_output = self.self_attention_dropout(self_attention_output)
if self._norm_first:
self_attention_output = source_tensor + self_attention_output
else:
self_attention_output = self.self_attention_layer_norm(
input_tensor + self_attention_output)
if self._norm_first:
source_self_attention_output = self_attention_output
self_attention_output = self.encdec_attention_layer_norm(
self_attention_output)
cross_attn_inputs = dict(
query=self_attention_output + input_pos_embed,
key=memory + memory_pos_embed,
value=memory,
attention_mask=attention_mask)
attention_output = self.encdec_attention(**cross_attn_inputs)
attention_output = self.encdec_attention_dropout(attention_output)
if self._norm_first:
attention_output = source_self_attention_output + attention_output
else:
attention_output = self.encdec_attention_layer_norm(
self_attention_output + attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self.output_layer_norm(attention_output)
intermediate_output = self.intermediate_dense(attention_output)
intermediate_output = self.intermediate_activation_layer(
intermediate_output)
intermediate_output = self._intermediate_dropout_layer(intermediate_output)
layer_output = self.output_dense(intermediate_output)
layer_output = self.output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self.output_layer_norm(layer_output + attention_output)
return layer_output, cache
| 36,237 | 41.632941 | 105 | py |
models | models-master/official/projects/detr/tasks/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DETR detection task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.detr.configs import detr as detr_cfg
from official.projects.detr.dataloaders import coco
from official.projects.detr.dataloaders import detr_input
from official.projects.detr.modeling import detr
from official.projects.detr.ops import matchers
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.modeling import backbones
from official.vision.ops import box_ops
@task_factory.register_task_cls(detr_cfg.DetrTask)
class DetectionTask(base_task.Task):
"""A single-replica view of training procedure.
DETR task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build DETR model."""
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self._task_config.model.input_size)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self._task_config.model.backbone,
norm_activation_config=self._task_config.model.norm_activation)
model = detr.DETR(backbone,
self._task_config.model.backbone_endpoint_name,
self._task_config.model.num_queries,
self._task_config.model.hidden_size,
self._task_config.model.num_classes,
self._task_config.model.num_encoder_layers,
self._task_config.model.num_decoder_layers)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self._task_config.init_checkpoint:
return
ckpt_dir_or_file = self._task_config.init_checkpoint
# Restoring checkpoint.
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self._task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
elif self._task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
"""Build input dataset."""
if isinstance(params, coco.COCODataConfig):
dataset = coco.COCODataLoader(params).load(input_context)
else:
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = detr_input.Parser(
class_offset=self._task_config.losses.class_offset,
output_size=self._task_config.model.input_size[:2],
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def _compute_cost(self, cls_outputs, box_outputs, cls_targets, box_targets):
# Approximate classification cost with 1 - prob[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
# background: 0
cls_cost = self._task_config.losses.lambda_cls * tf.gather(
-tf.nn.softmax(cls_outputs), cls_targets, batch_dims=1, axis=-1)
# Compute the L1 cost between boxes,
paired_differences = self._task_config.losses.lambda_box * tf.abs(
tf.expand_dims(box_outputs, 2) - tf.expand_dims(box_targets, 1))
box_cost = tf.reduce_sum(paired_differences, axis=-1)
# Compute the giou cost betwen boxes
giou_cost = self._task_config.losses.lambda_giou * -box_ops.bbox_generalized_overlap(
box_ops.cycxhw_to_yxyx(box_outputs),
box_ops.cycxhw_to_yxyx(box_targets))
total_cost = cls_cost + box_cost + giou_cost
max_cost = (
self._task_config.losses.lambda_cls * 0.0 +
self._task_config.losses.lambda_box * 4. +
self._task_config.losses.lambda_giou * 0.0)
# Set pads to large constant
valid = tf.expand_dims(
tf.cast(tf.not_equal(cls_targets, 0), dtype=total_cost.dtype), axis=1)
total_cost = (1 - valid) * max_cost + valid * total_cost
# Set inf of nan to large constant
total_cost = tf.where(
tf.logical_or(tf.math.is_nan(total_cost), tf.math.is_inf(total_cost)),
max_cost * tf.ones_like(total_cost, dtype=total_cost.dtype),
total_cost)
return total_cost
def build_losses(self, outputs, labels, aux_losses=None):
"""Builds DETR losses."""
cls_outputs = outputs['cls_outputs']
box_outputs = outputs['box_outputs']
cls_targets = labels['classes']
box_targets = labels['boxes']
cost = self._compute_cost(
cls_outputs, box_outputs, cls_targets, box_targets)
_, indices = matchers.hungarian_matching(cost)
indices = tf.stop_gradient(indices)
target_index = tf.math.argmax(indices, axis=1)
cls_assigned = tf.gather(cls_outputs, target_index, batch_dims=1, axis=1)
box_assigned = tf.gather(box_outputs, target_index, batch_dims=1, axis=1)
background = tf.equal(cls_targets, 0)
num_boxes = tf.reduce_sum(
tf.cast(tf.logical_not(background), tf.float32), axis=-1)
# Down-weight background to account for class imbalance.
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=cls_targets, logits=cls_assigned)
cls_loss = self._task_config.losses.lambda_cls * tf.where(
background, self._task_config.losses.background_cls_weight * xentropy,
xentropy)
cls_weights = tf.where(
background,
self._task_config.losses.background_cls_weight * tf.ones_like(cls_loss),
tf.ones_like(cls_loss))
# Box loss is only calculated on non-background class.
l_1 = tf.reduce_sum(tf.abs(box_assigned - box_targets), axis=-1)
box_loss = self._task_config.losses.lambda_box * tf.where(
background, tf.zeros_like(l_1), l_1)
# Giou loss is only calculated on non-background class.
giou = tf.linalg.diag_part(1.0 - box_ops.bbox_generalized_overlap(
box_ops.cycxhw_to_yxyx(box_assigned),
box_ops.cycxhw_to_yxyx(box_targets)
))
giou_loss = self._task_config.losses.lambda_giou * tf.where(
background, tf.zeros_like(giou), giou)
# Consider doing all reduce once in train_step to speed up.
num_boxes_per_replica = tf.reduce_sum(num_boxes)
cls_weights_per_replica = tf.reduce_sum(cls_weights)
replica_context = tf.distribute.get_replica_context()
num_boxes_sum, cls_weights_sum = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM,
[num_boxes_per_replica, cls_weights_per_replica])
cls_loss = tf.math.divide_no_nan(
tf.reduce_sum(cls_loss), cls_weights_sum)
box_loss = tf.math.divide_no_nan(
tf.reduce_sum(box_loss), num_boxes_sum)
giou_loss = tf.math.divide_no_nan(
tf.reduce_sum(giou_loss), num_boxes_sum)
aux_losses = tf.add_n(aux_losses) if aux_losses else 0.0
total_loss = cls_loss + box_loss + giou_loss + aux_losses
return total_loss, cls_loss, box_loss, giou_loss
def build_metrics(self, training=True):
"""Builds detection metrics."""
metrics = []
metric_names = ['cls_loss', 'box_loss', 'giou_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=False,
need_rescale_bboxes=True,
per_category_metrics=self._task_config.per_category_metrics)
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
with tf.GradientTape() as tape:
outputs = model(features, training=True)
loss = 0.0
cls_loss = 0.0
box_loss = 0.0
giou_loss = 0.0
for output in outputs:
# Computes per-replica loss.
layer_loss, layer_cls_loss, layer_box_loss, layer_giou_loss = self.build_losses(
outputs=output, labels=labels, aux_losses=model.losses)
loss += layer_loss
cls_loss += layer_cls_loss
box_loss += layer_box_loss
giou_loss += layer_giou_loss
# Consider moving scaling logic from build_losses to here.
scaled_loss = loss
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
cls_loss *= num_replicas_in_sync
box_loss *= num_replicas_in_sync
giou_loss *= num_replicas_in_sync
# Trainer class handles loss metric for you.
logs = {self.loss: loss}
all_losses = {
'cls_loss': cls_loss,
'box_loss': box_loss,
'giou_loss': giou_loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, training=False)[-1]
loss, cls_loss, box_loss, giou_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
cls_loss *= num_replicas_in_sync
box_loss *= num_replicas_in_sync
giou_loss *= num_replicas_in_sync
# Evaluator class handles loss metric for you.
logs = {self.loss: loss}
# This is for backward compatibility.
if 'detection_boxes' not in outputs:
detection_boxes = box_ops.cycxhw_to_yxyx(
outputs['box_outputs']) * tf.expand_dims(
tf.concat([
labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
1],
labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
1]
],
axis=1),
axis=1)
else:
detection_boxes = outputs['detection_boxes']
detection_scores = tf.math.reduce_max(
tf.nn.softmax(outputs['cls_outputs'])[:, :, 1:], axis=-1
) if 'detection_scores' not in outputs else outputs['detection_scores']
if 'detection_classes' not in outputs:
detection_classes = tf.math.argmax(
outputs['cls_outputs'][:, :, 1:], axis=-1) + 1
else:
detection_classes = outputs['detection_classes']
if 'num_detections' not in outputs:
num_detections = tf.reduce_sum(
tf.cast(
tf.math.greater(
tf.math.reduce_max(outputs['cls_outputs'], axis=-1), 0),
tf.int32),
axis=-1)
else:
num_detections = outputs['num_detections']
predictions = {
'detection_boxes': detection_boxes,
'detection_scores': detection_scores,
'detection_classes': detection_classes,
'num_detections': num_detections,
'source_id': labels['id'],
'image_info': labels['image_info']
}
ground_truths = {
'source_id': labels['id'],
'height': labels['image_info'][:, 0:1, 0],
'width': labels['image_info'][:, 0:1, 1],
'num_detections': tf.reduce_sum(
tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1),
'boxes': labels['gt_boxes'],
'classes': labels['classes'],
'is_crowds': labels['is_crowd']
}
logs.update({'predictions': predictions,
'ground_truths': ground_truths})
all_losses = {
'cls_loss': cls_loss,
'box_loss': box_loss,
'giou_loss': giou_loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
state.update_state(
step_outputs['ground_truths'],
step_outputs['predictions'])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return aggregated_logs.result()
| 16,007 | 36.933649 | 89 | py |
models | models-master/official/projects/yolo/serving/export_module_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for YOLO export modules."""
from typing import Any, Callable, Dict, List, Optional, Text, Union
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.projects.yolo.configs.yolo import YoloTask
from official.projects.yolo.configs.yolov7 import YoloV7Task
from official.projects.yolo.modeling import factory as yolo_factory
from official.projects.yolo.modeling.backbones import darknet # pylint: disable=unused-import
from official.projects.yolo.modeling.decoders import yolo_decoder # pylint: disable=unused-import
from official.projects.yolo.serving import model_fn as yolo_model_fn
from official.vision import configs
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_utils
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None,
eval_postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g. tf.TensorSpec(shape=[None, 224, 224,
3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
eval_postprocessor: An optional callable to postprocess model outputs used
for model evaluation.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.eval_postprocessor = eval_postprocessor
self.input_signature = input_signature
@tf.function
def serve(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
@tf.function
def serve_eval(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.eval_postprocessor(x) if self.eval_postprocessor else x
return x
def get_inference_signatures(
self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
if 'eval' in def_name and self.eval_postprocessor:
signatures[def_name] = self.serve_eval.get_concrete_function(
self.input_signature)
else:
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
def create_classification_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Creates classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels, input_name)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(inputs, input_image_size,
num_channels)
images = tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels], dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def create_yolo_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Creates YOLO export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels, input_name)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
if isinstance(params.task, YoloTask):
model, _ = yolo_factory.build_yolo(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
elif isinstance(params.task, YoloV7Task):
model = yolo_factory.build_yolov7(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
def normalize_image_fn(inputs):
image = tf.cast(inputs, dtype=tf.float32)
return image / 255.0
# If input_type is `tflite`, do not apply image preprocessing. Only apply
# normalization.
if input_type == 'tflite':
return normalize_image_fn(image_tensor), None
def preprocess_image_fn(inputs):
image = normalize_image_fn(inputs)
(image, image_info) = yolo_model_fn.letterbox(
image,
input_image_size,
letter_box=params.task.validation_data.parser.letter_box)
return image, image_info
images_spec = tf.TensorSpec(shape=input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
return images, image_info
def inference_steps(inputs, model):
images, image_info = inputs
detection = model.call(images, training=False)
if input_type != 'tflite':
detection['bbox'] = yolo_model_fn.undo_info(
detection['bbox'],
detection['num_detections'],
image_info,
expand=False,
)
final_outputs = {
'detection_boxes': detection['bbox'],
'detection_scores': detection['confidence'],
'detection_classes': detection['classes'],
'num_detections': detection['num_detections']
}
return final_outputs
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
inference_step=inference_steps)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(params, input_type,
batch_size,
input_image_size,
num_channels,
input_name)
elif isinstance(params.task, (YoloTask, YoloV7Task)):
export_module = create_yolo_export_module(params, input_type, batch_size,
input_image_size, num_channels,
input_name)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module
| 9,910 | 36.4 | 98 | py |
models | models-master/official/projects/yolo/optimization/optimizer_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer factory class."""
import gin
from official.modeling.optimization import ema_optimizer
from official.modeling.optimization import optimizer_factory
from official.projects.yolo.optimization import sgd_torch
optimizer_factory.LEGACY_OPTIMIZERS_CLS.update({
'sgd_torch': sgd_torch.SGDTorch,
})
OPTIMIZERS_CLS = optimizer_factory.LEGACY_OPTIMIZERS_CLS
LR_CLS = optimizer_factory.LR_CLS
WARMUP_CLS = optimizer_factory.WARMUP_CLS
class OptimizerFactory(optimizer_factory.OptimizerFactory):
"""Optimizer factory class.
This class builds learning rate and optimizer based on an optimization config.
To use this class, you need to do the following:
(1) Define optimization config, this includes optimizer, and learning rate
schedule.
(2) Initialize the class using the optimization config.
(3) Build learning rate.
(4) Build optimizer.
This is a typical example for using this class:
params = {
'optimizer': {
'type': 'sgd',
'sgd': {'momentum': 0.9}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {'boundaries': [10000, 20000],
'values': [0.1, 0.01, 0.001]}
},
'warmup': {
'type': 'linear',
'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01}
}
}
opt_config = OptimizationConfig(params)
opt_factory = OptimizerFactory(opt_config)
lr = opt_factory.build_learning_rate()
optimizer = opt_factory.build_optimizer(lr)
"""
def get_bias_lr_schedule(self, bias_lr):
"""Build learning rate.
Builds learning rate from config. Learning rate schedule is built according
to the learning rate config. If learning rate type is consant,
lr_config.learning_rate is returned.
Args:
bias_lr: learning rate config.
Returns:
tf.keras.optimizers.schedules.LearningRateSchedule instance. If
learning rate type is consant, lr_config.learning_rate is returned.
"""
if self._lr_type == 'constant':
lr = self._lr_config.learning_rate
else:
lr = LR_CLS[self._lr_type](**self._lr_config.as_dict())
if self._warmup_config:
if self._warmup_type != 'linear':
raise ValueError('Smart Bias is only supported currently with a'
'linear warm up.')
warm_up_cfg = self._warmup_config.as_dict()
warm_up_cfg['warmup_learning_rate'] = bias_lr
lr = WARMUP_CLS['linear'](lr, **warm_up_cfg)
return lr
@gin.configurable
def add_ema(self, optimizer):
"""Add EMA to the optimizer independently of the build optimizer method."""
if self._use_ema:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer, **self._ema_config.as_dict())
return optimizer
| 3,403 | 33.04 | 80 | py |
models | models-master/official/projects/yolo/optimization/sgd_torch.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SGD PyTorch optimizer."""
import re
from absl import logging
import tensorflow as tf
LearningRateSchedule = tf.keras.optimizers.schedules.LearningRateSchedule
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
class SGDTorch(tf.keras.optimizers.legacy.Optimizer):
"""Optimizer that simulates the SGD module used in pytorch.
For details on the differences between the original SGD implemention and the
one in pytorch:
https://pytorch.org/docs/stable/generated/torch.optim.SGD.html.
This optimizer also allow for the usage of a momentum warmup along side a
learning rate warm up, though using this is not required.
Example of usage for training:
```python
opt = SGDTorch(learning_rate, weight_decay = 0.0001)
l2_regularization = None
# iterate all model.trainable_variables and split the variables by key
# into the weights, biases, and others.
optimizer.search_and_set_variable_groups(model.trainable_variables)
# if the learning rate schedule on the biases are different. if lr is not set
# the default schedule used for weights will be used on the biases.
opt.set_bias_lr(<lr schedule>)
# if the learning rate schedule on the others are different. if lr is not set
# the default schedule used for weights will be used on the biases.
opt.set_other_lr(<lr schedule>)
```
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
weight_decay=0.0,
learning_rate=0.01,
momentum=0.0,
momentum_start=0.0,
warmup_steps=1000,
nesterov=False,
name="SGD",
weight_keys=("kernel", "weight"),
bias_keys=("bias", "beta"),
**kwargs):
super(SGDTorch, self).__init__(name, **kwargs)
# Create Hyper Params for each group of the LR
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("bias_learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("other_learning_rate", kwargs.get("lr", learning_rate))
# SGD decay param
self._set_hyper("decay", self._initial_decay)
# Weight decay param
self._weight_decay = weight_decay != 0.0
self._set_hyper("weight_decay", weight_decay)
# Enable Momentum
self._momentum = False
if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0:
self._momentum = True
if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
raise ValueError("`momentum` must be between [0, 1].")
self._set_hyper("momentum", momentum)
self._set_hyper("momentum_start", momentum_start)
self._set_hyper("warmup_steps", tf.cast(warmup_steps, tf.int32))
# Enable Nesterov Momentum
self.nesterov = nesterov
# weights, biases, other
self._weight_keys = weight_keys
self._bias_keys = bias_keys
self._variables_set = False
self._wset = set()
self._bset = set()
self._oset = set()
logging.info("Pytorch SGD simulation: ")
logging.info("Weight Decay: %f", weight_decay)
def set_bias_lr(self, lr):
self._set_hyper("bias_learning_rate", lr)
def set_other_lr(self, lr):
self._set_hyper("other_learning_rate", lr)
def _search(self, var, keys):
"""Search all all keys for matches. Return True on match."""
if keys is not None:
# variable group is not ignored so search for the keys.
for r in keys:
if re.search(r, var.name) is not None:
return True
return False
def search_and_set_variable_groups(self, variables):
"""Search all variable for matches at each group."""
weights = []
biases = []
others = []
for var in variables:
if self._search(var, self._weight_keys):
# search for weights
weights.append(var)
elif self._search(var, self._bias_keys):
# search for biases
biases.append(var)
else:
# if all searches fail, add to other group
others.append(var)
self._set_variable_groups(weights, biases, others)
return weights, biases, others
def _set_variable_groups(self, weights, biases, others):
"""Sets the variables to be used in each group."""
if self._variables_set:
logging.warning("_set_variable_groups has been called again indicating"
"that the variable groups have already been set, they"
"will be updated.")
self._wset.update(set([_var_key(w) for w in weights]))
self._bset.update(set([_var_key(b) for b in biases]))
self._oset.update(set([_var_key(o) for o in others]))
self._variables_set = True
return
def _get_variable_group(self, var, coefficients):
if self._variables_set:
# check which groups hold which varaibles, preset.
if _var_key(var) in self._wset:
return True, False, False
elif _var_key(var) in self._bset:
return False, True, False
else:
# search the variables at run time.
if self._search(var, self._weight_keys):
return True, False, False
elif self._search(var, self._bias_keys):
return False, True, False
return False, False, True
def _create_slots(self, var_list):
"""Create a momentum variable for each variable."""
if self._momentum:
for var in var_list:
# check if trainable to support GPU EMA.
if var.trainable:
self.add_slot(var, "momentum")
def _get_momentum(self, iteration):
"""Get the momentum value."""
momentum = self._get_hyper("momentum")
momentum_start = self._get_hyper("momentum_start")
momentum_warm_up_steps = tf.cast(
self._get_hyper("warmup_steps"), iteration.dtype)
value = tf.cond(
(iteration - momentum_warm_up_steps) <= 0,
true_fn=lambda: (momentum_start + # pylint: disable=g-long-lambda
(tf.cast(iteration, momentum.dtype) *
(momentum - momentum_start) / tf.cast(
momentum_warm_up_steps, momentum.dtype))),
false_fn=lambda: momentum)
return value
def _prepare_local(self, var_device, var_dtype, apply_state):
super(SGDTorch, self)._prepare_local(var_device, var_dtype, apply_state) # pytype: disable=attribute-error
weight_decay = self._get_hyper("weight_decay")
apply_state[(var_device,
var_dtype)]["weight_decay"] = tf.cast(weight_decay, var_dtype)
if self._momentum:
momentum = self._get_momentum(self.iterations)
momentum = tf.cast(momentum, var_dtype)
apply_state[(var_device,
var_dtype)]["momentum"] = tf.identity(momentum)
bias_lr = self._get_hyper("bias_learning_rate")
if isinstance(bias_lr, LearningRateSchedule):
bias_lr = bias_lr(self.iterations)
bias_lr = tf.cast(bias_lr, var_dtype)
apply_state[(var_device,
var_dtype)]["bias_lr_t"] = tf.identity(bias_lr)
other_lr = self._get_hyper("other_learning_rate")
if isinstance(other_lr, LearningRateSchedule):
other_lr = other_lr(self.iterations)
other_lr = tf.cast(other_lr, var_dtype)
apply_state[(var_device,
var_dtype)]["other_lr_t"] = tf.identity(other_lr)
return apply_state[(var_device, var_dtype)]
def _apply(self, grad, var, weight_decay, momentum, lr):
"""Uses Pytorch Optimizer with Weight decay SGDW."""
dparams = grad
groups = []
# do not update non-trainable weights
if not var.trainable:
return tf.group(*groups)
if self._weight_decay:
dparams += (weight_decay * var)
if self._momentum:
momentum_var = self.get_slot(var, "momentum")
momentum_update = momentum_var.assign(
momentum * momentum_var + dparams, use_locking=self._use_locking)
groups.append(momentum_update)
if self.nesterov:
dparams += (momentum * momentum_update)
else:
dparams = momentum_update
weight_update = var.assign_add(-lr * dparams, use_locking=self._use_locking)
groups.append(weight_update)
return tf.group(*groups)
def _run_sgd(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
weights, bias, others = self._get_variable_group(var, coefficients)
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["lr_t"]
if weights:
weight_decay = coefficients["weight_decay"]
lr = coefficients["lr_t"]
elif bias:
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["bias_lr_t"]
elif others:
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["other_lr_t"]
momentum = coefficients["momentum"]
return self._apply(grad, var, weight_decay, momentum, lr)
def _resource_apply_dense(self, grad, var, apply_state=None):
return self._run_sgd(grad, var, apply_state=apply_state)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
# This method is only needed for momentum optimization.
holder = tf.tensor_scatter_nd_add(
tf.zeros_like(var), tf.expand_dims(indices, axis=-1), grad)
return self._run_sgd(holder, var, apply_state=apply_state)
def get_config(self):
config = super(SGDTorch, self).get_config()
config.update({
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._initial_decay,
"momentum": self._serialize_hyperparameter("momentum"),
"momentum_start": self._serialize_hyperparameter("momentum_start"),
"weight_decay": self._serialize_hyperparameter("weight_decay"),
"warmup_steps": self._serialize_hyperparameter("warmup_steps"),
"nesterov": self.nesterov,
})
return config
@property
def learning_rate(self):
return self._optimizer._get_hyper("learning_rate") # pylint: disable=protected-access
| 11,225 | 34.751592 | 111 | py |
models | models-master/official/projects/yolo/optimization/configs/optimizer_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for optimizer configs."""
import dataclasses
from typing import List, Optional
from official.modeling.hyperparams import base_config
from official.modeling.optimization.configs import optimizer_config
@dataclasses.dataclass
class BaseOptimizerConfig(base_config.Config):
"""Base optimizer config.
Attributes:
clipnorm: float >= 0 or None. If not None, Gradients will be clipped when
their L2 norm exceeds this value.
clipvalue: float >= 0 or None. If not None, Gradients will be clipped when
their absolute value exceeds this value.
global_clipnorm: float >= 0 or None. If not None, gradient of all weights is
clipped so that their global norm is no higher than this value
"""
clipnorm: Optional[float] = None
clipvalue: Optional[float] = None
global_clipnorm: Optional[float] = None
@dataclasses.dataclass
class SGDTorchConfig(optimizer_config.BaseOptimizerConfig):
"""Configuration for SGD optimizer.
The attributes for this class matches the arguments of tf.keras.optimizer.SGD.
Attributes:
name: name of the optimizer.
decay: decay rate for SGD optimizer.
nesterov: nesterov for SGD optimizer.
momentum_start: momentum starting point for SGD optimizer.
momentum: momentum for SGD optimizer.
"""
name: str = "SGD"
decay: float = 0.0
nesterov: bool = False
momentum_start: float = 0.0
momentum: float = 0.9
warmup_steps: int = 0
weight_decay: float = 0.0
weight_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["kernel", "weight"])
bias_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["bias", "beta"])
| 2,283 | 34.6875 | 80 | py |
models | models-master/official/projects/yolo/optimization/configs/optimization_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for optimization configs.
This file define the dataclass for optimization configs (OptimizationConfig).
It also has two helper functions get_optimizer_config, and get_lr_config from
an OptimizationConfig class.
"""
import dataclasses
from typing import Optional
from official.modeling.optimization.configs import optimization_config as optimization_cfg
from official.projects.yolo.optimization.configs import optimizer_config as opt_cfg
@dataclasses.dataclass
class OptimizerConfig(optimization_cfg.OptimizerConfig):
"""Configuration for optimizer.
Attributes:
type: 'str', type of optimizer to be used, on the of fields below.
sgd: sgd optimizer config.
adam: adam optimizer config.
adamw: adam with weight decay.
lamb: lamb optimizer.
rmsprop: rmsprop optimizer.
"""
type: Optional[str] = None
sgd_torch: opt_cfg.SGDTorchConfig = dataclasses.field(
default_factory=opt_cfg.SGDTorchConfig
)
@dataclasses.dataclass
class OptimizationConfig(optimization_cfg.OptimizationConfig):
"""Configuration for optimizer and learning rate schedule.
Attributes:
optimizer: optimizer oneof config.
ema: optional exponential moving average optimizer config, if specified, ema
optimizer will be used.
learning_rate: learning rate oneof config.
warmup: warmup oneof config.
"""
type: Optional[str] = None
optimizer: OptimizerConfig = dataclasses.field(
default_factory=OptimizerConfig
)
| 2,084 | 33.180328 | 90 | py |
models | models-master/official/projects/yolo/configs/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 configuration definition."""
import dataclasses
import os
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yolo import optimization
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import decoders
from official.projects.yolo.configs.yolo import AnchorBoxes
from official.projects.yolo.configs.yolo import DataConfig
from official.projects.yolo.configs.yolo import Mosaic
from official.projects.yolo.configs.yolo import Parser
from official.projects.yolo.configs.yolo import YoloDetectionGenerator
from official.vision.configs import common
# pytype: disable=annotation-type-mismatch
MIN_LEVEL = 3
MAX_LEVEL = 5
GLOBAL_SEED = 1000
def _build_dict(min_level, max_level, value):
vals = {str(key): value for key in range(min_level, max_level + 1)}
vals['all'] = None
return lambda: vals
def _build_path_scales(min_level, max_level):
return lambda: {str(key): 2**key for key in range(min_level, max_level + 1)}
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
coco91_to_80: bool = True
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class YoloV7Head(hyperparams.Config):
"""Parameterization for the YOLO Head."""
num_anchors: int = 3
use_separable_conv: bool = False
@dataclasses.dataclass
class YoloV7Loss(hyperparams.Config):
"""Config or YOLOv7 loss."""
alpha: float = 0.0
gamma: float = 0.0
box_weight: float = 0.05
obj_weight: float = 0.7
cls_weight: float = 0.3
label_smoothing: float = 0.0
anchor_threshold: float = 4.0
iou_mix_ratio: float = 1.0
auto_balance: bool = False
use_ota: bool = True
@dataclasses.dataclass
class Box(hyperparams.Config):
box: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class YoloV7(hyperparams.Config):
input_size: Optional[List[int]] = dataclasses.field(
default_factory=lambda: [640, 640, 3]
)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='yolov7', yolov7=backbones.YoloV7(model_id='yolov7')
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder( # pylint: disable=g-long-lambda
type='yolov7', yolo_decoder=decoders.YoloV7(model_id='yolov7')
)
)
head: YoloV7Head = dataclasses.field(default_factory=YoloV7Head)
detection_generator: YoloDetectionGenerator = dataclasses.field(
default_factory=lambda: YoloDetectionGenerator( # pylint: disable=g-long-lambda
box_type=_build_dict(MIN_LEVEL, MAX_LEVEL, 'scaled')(),
scale_xy=_build_dict(MIN_LEVEL, MAX_LEVEL, 2.0)(),
path_scales=_build_path_scales(MIN_LEVEL, MAX_LEVEL)(),
nms_version='iou',
iou_thresh=0.001,
nms_thresh=0.7,
max_boxes=300,
pre_nms_points=5000,
)
)
loss: YoloV7Loss = dataclasses.field(default_factory=YoloV7Loss)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation='swish',
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
)
)
num_classes: int = 80
min_level: int = 3
max_level: int = 5
anchor_boxes: AnchorBoxes = dataclasses.field(default_factory=AnchorBoxes)
@dataclasses.dataclass
class YoloV7Task(cfg.TaskConfig):
per_category_metrics: bool = False
smart_bias_lr: float = 0.0
model: YoloV7 = dataclasses.field(default_factory=YoloV7)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
weight_decay: float = 0.0
annotation_file: Optional[str] = None
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[str, List[str]] = (
'all' # all, backbone, and/or decoder
)
gradient_clip_norm: float = 0.0
seed = GLOBAL_SEED
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
COCO_INPUT_PATH_BASE = (
'/readahead/200M/placer/prod/home/tensorflow-performance-data/datasets/coco'
)
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('yolov7')
def yolov7() -> cfg.ExperimentConfig:
"""YOLOv7 general config."""
return cfg.ExperimentConfig(
task=YoloV7Task(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
@exp_factory.register_config_factory('coco_yolov7')
def coco_yolov7() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7."""
train_batch_size = 256
eval_batch_size = 256
train_epochs = 300
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
validation_interval = 5
warmup_steps = 3 * steps_per_epoch
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=YoloV7Task(
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=None,
weight_decay=0.0,
model=YoloV7(
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.03,
norm_epsilon=0.001,
use_sync_bn=True),
head=YoloV7Head(),
loss=YoloV7Loss(),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401]),
],
),
),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
max_num_instances=300,
letter_box=True,
random_flip=True,
random_pad=False,
jitter=0.0,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_translate=0.2,
aug_rand_saturation=0.7,
aug_rand_brightness=0.4,
aug_rand_hue=0.015,
aug_rand_angle=0.0,
aug_rand_perspective=0.0,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.0,
mosaic=Mosaic(
mosaic_frequency=1.0,
mosaic9_frequency=0.2,
mixup_frequency=0.15,
mosaic_crop_mode='scale',
mosaic_center=0.25,
mosaic9_center=0.33,
aug_scale_min=0.1,
aug_scale_max=1.9,
),
),
),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True,
dtype='float32',
parser=Parser(
max_num_instances=300,
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.0,
),
),
smart_bias_lr=0.1,
),
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
best_checkpoint_metric_comp='higher',
train_steps=train_epochs * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9999,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.937,
'momentum_start': 0.8,
'nesterov': True,
'warmup_steps': warmup_steps,
# Scale up the weight decay by batch size.
'weight_decay': 0.0005 * train_batch_size / 64,
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.01,
'alpha': 0.1,
'decay_steps': train_epochs * steps_per_epoch,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_steps,
'warmup_learning_rate': 0.0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
@exp_factory.register_config_factory('coco_yolov7tiny')
def coco_yolov7_tiny() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7-tiny."""
config = coco_yolov7()
config.task.model.input_size = [416, 416, 3]
config.task.model.backbone.yolov7.model_id = 'yolov7-tiny'
config.task.model.decoder.yolov7.model_id = 'yolov7-tiny'
config.task.model.norm_activation.activation = 'leaky'
config.task.model.anchor_boxes.boxes = [
Box(box=[10, 13]),
Box(box=[16, 30]),
Box(box=[33, 23]),
Box(box=[30, 61]),
Box(box=[62, 45]),
Box(box=[59, 119]),
Box(box=[116, 90]),
Box(box=[156, 198]),
Box(box=[373, 326]),
]
config.task.model.loss.cls_weight = 0.5
config.task.model.loss.obj_weight = 1.0
config.task.train_data.parser.aug_rand_translate = 0.1
config.task.train_data.parser.mosaic.mixup_frequency = 0.05
config.task.train_data.parser.mosaic.aug_scale_min = 0.5
config.task.train_data.parser.mosaic.aug_scale_max = 1.5
config.trainer.optimizer_config.learning_rate.cosine.alpha = 0.01
return config
@exp_factory.register_config_factory('coco91_yolov7tiny')
def coco91_yolov7_tiny() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7-tiny using 91 classes."""
config = coco_yolov7_tiny()
config.task.model.num_classes = 91
config.task.model.decoder.yolov7.use_separable_conv = True
config.task.model.head.use_separable_conv = True
config.task.train_data.coco91_to_80 = False
config.task.validation_data.coco91_to_80 = False
return config
@exp_factory.register_config_factory('coco_yolov7x')
def coco_yolov7x() -> cfg.ExperimentConfig:
config = coco_yolov7()
config.task.model.backbone.yolov7.model_id = 'yolov7x'
config.task.model.decoder.yolov7.model_id = 'yolov7x'
return config
| 12,932 | 33.034211 | 86 | py |
models | models-master/official/projects/yolo/configs/yolo.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO configuration definition."""
import dataclasses
import os
from typing import Any, List, Optional, Union
import numpy as np
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yolo import optimization
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import decoders
from official.vision.configs import common
# pytype: disable=annotation-type-mismatch
MIN_LEVEL = 1
MAX_LEVEL = 7
GLOBAL_SEED = 1000
def _build_dict(min_level, max_level, value):
vals = {str(key): value for key in range(min_level, max_level + 1)}
vals['all'] = None
return lambda: vals
def _build_path_scales(min_level, max_level):
return lambda: {str(key): 2**key for key in range(min_level, max_level + 1)}
@dataclasses.dataclass
class FPNConfig(hyperparams.Config):
"""FPN config."""
all: Optional[Any] = None
def get(self):
"""Allow for a key for each level or a single key for all the levels."""
values = self.as_dict()
if 'all' in values and values['all'] is not None:
for key in values:
if key != 'all':
values[key] = values['all']
return values
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
coco91_to_80: bool = True
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class Mosaic(hyperparams.Config):
mosaic_frequency: float = 0.0
mosaic9_frequency: float = 0.0
mixup_frequency: float = 0.0
mosaic_center: float = 0.2
mosaic9_center: float = 0.33
mosaic_crop_mode: Optional[str] = None
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
jitter: float = 0.0
@dataclasses.dataclass
class Parser(hyperparams.Config):
max_num_instances: int = 200
letter_box: Optional[bool] = True
random_flip: bool = True
random_pad: float = False
jitter: float = 0.0
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_saturation: float = 0.0
aug_rand_brightness: float = 0.0
aug_rand_hue: float = 0.0
aug_rand_angle: float = 0.0
aug_rand_translate: float = 0.0
aug_rand_perspective: float = 0.0
use_tie_breaker: bool = True
best_match_only: bool = False
anchor_thresh: float = -0.01
area_thresh: float = 0.1
mosaic: Mosaic = dataclasses.field(default_factory=Mosaic)
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
global_batch_size: int = 64
input_path: str = ''
tfds_name: str = ''
tfds_split: str = ''
is_training: bool = True
dtype: str = 'float16'
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: Parser = dataclasses.field(default_factory=Parser)
shuffle_buffer_size: int = 10000
tfds_download: bool = True
cache: bool = False
drop_remainder: bool = True
file_type: str = 'tfrecord'
@dataclasses.dataclass
class YoloHead(hyperparams.Config):
"""Parameterization for the YOLO Head."""
smart_bias: bool = True
@dataclasses.dataclass
class YoloDetectionGenerator(hyperparams.Config):
apply_nms: bool = True
box_type: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 'original'))
scale_xy: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
path_scales: FPNConfig = dataclasses.field(
default_factory=_build_path_scales(MIN_LEVEL, MAX_LEVEL))
# Choose from v1, v2, iou and greedy.
nms_version: str = 'greedy'
iou_thresh: float = 0.001
nms_thresh: float = 0.6
max_boxes: int = 200
pre_nms_points: int = 5000
# Only works when nms_version='v2'.
use_class_agnostic_nms: Optional[bool] = False
@dataclasses.dataclass
class YoloLoss(hyperparams.Config):
ignore_thresh: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 0.0))
truth_thresh: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
box_loss_type: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 'ciou'))
iou_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
cls_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
object_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
max_delta: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, np.inf))
objectness_smooth: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 0.0))
label_smoothing: float = 0.0
use_scaled_loss: bool = True
update_on_repeat: bool = True
@dataclasses.dataclass
class Box(hyperparams.Config):
box: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class AnchorBoxes(hyperparams.Config):
boxes: Optional[List[Box]] = None
level_limits: Optional[List[int]] = None
anchors_per_scale: int = 3
generate_anchors: bool = False
scaling_mode: str = 'sqrt'
box_generation_mode: str = 'per_level'
num_samples: int = 1024
def get(self, min_level, max_level):
"""Distribute them in order to each level.
Args:
min_level: `int` the lowest output level.
max_level: `int` the heighest output level.
Returns:
anchors_per_level: A `Dict[List[int]]` of the anchor boxes for each level.
self.level_limits: A `List[int]` of the box size limits to link to each
level under anchor free conditions.
"""
if self.level_limits is None:
boxes = [box.box for box in self.boxes]
else:
boxes = [[1.0, 1.0]] * ((max_level - min_level) + 1)
self.anchors_per_scale = 1
anchors_per_level = dict()
start = 0
for i in range(min_level, max_level + 1):
anchors_per_level[str(i)] = boxes[start:start + self.anchors_per_scale]
start += self.anchors_per_scale
return anchors_per_level, self.level_limits
def set_boxes(self, boxes):
self.boxes = [Box(box=box) for box in boxes]
@dataclasses.dataclass
class Yolo(hyperparams.Config):
input_size: Optional[List[int]] = dataclasses.field(
default_factory=lambda: [512, 512, 3])
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='darknet', darknet=backbones.Darknet(model_id='cspdarknet53')
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder( # pylint: disable=g-long-lambda
type='yolo_decoder',
yolo_decoder=decoders.YoloDecoder(version='v4', type='regular'),
)
)
head: YoloHead = dataclasses.field(default_factory=YoloHead)
detection_generator: YoloDetectionGenerator = dataclasses.field(
default_factory=YoloDetectionGenerator
)
loss: YoloLoss = dataclasses.field(default_factory=YoloLoss)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation='mish',
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
)
)
num_classes: int = 80
anchor_boxes: AnchorBoxes = dataclasses.field(default_factory=AnchorBoxes)
darknet_based_model: bool = False
@dataclasses.dataclass
class YoloTask(cfg.TaskConfig):
per_category_metrics: bool = False
smart_bias_lr: float = 0.0
model: Yolo = dataclasses.field(default_factory=Yolo)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
weight_decay: float = 0.0
annotation_file: Optional[str] = None
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
gradient_clip_norm: float = 0.0
seed = GLOBAL_SEED
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('yolo')
def yolo() -> cfg.ExperimentConfig:
"""Yolo general config."""
return cfg.ExperimentConfig(
task=YoloTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
@exp_factory.register_config_factory('yolo_darknet')
def yolo_darknet() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv3 and v4."""
train_batch_size = 256
eval_batch_size = 8
train_epochs = 300
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
validation_interval = 5
max_num_instances = 200
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YoloTask(
smart_bias_lr=0.1,
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=None,
weight_decay=0.0,
model=Yolo(
darknet_based_model=True,
norm_activation=common.NormActivation(use_sync_bn=True),
head=YoloHead(smart_bias=True),
loss=YoloLoss(use_scaled_loss=False, update_on_repeat=True),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401])
])),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
letter_box=False,
aug_rand_saturation=1.5,
aug_rand_brightness=1.5,
aug_rand_hue=0.1,
use_tie_breaker=True,
best_match_only=False,
anchor_thresh=0.4,
area_thresh=0.1,
max_num_instances=max_num_instances,
mosaic=Mosaic(
mosaic_frequency=0.75,
mixup_frequency=0.0,
mosaic_crop_mode='crop',
mosaic_center=0.2))),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True,
dtype='float32',
parser=Parser(
letter_box=False,
use_tie_breaker=True,
best_match_only=False,
anchor_thresh=0.4,
area_thresh=0.1,
max_num_instances=max_num_instances,
))),
trainer=cfg.TrainerConfig(
train_steps=train_epochs * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.949,
'momentum_start': 0.949,
'nesterov': True,
'warmup_steps': 1000,
'weight_decay': 0.0005,
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
240 * steps_per_epoch
],
'values': [
0.00131 * train_batch_size / 64.0,
0.000131 * train_batch_size / 64.0,
]
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('scaled_yolo')
def scaled_yolo() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv4-csp and v4."""
train_batch_size = 256
eval_batch_size = 256
train_epochs = 300
warmup_epochs = 3
validation_interval = 5
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
max_num_instances = 300
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YoloTask(
smart_bias_lr=0.1,
init_checkpoint_modules='',
weight_decay=0.0,
annotation_file=None,
model=Yolo(
darknet_based_model=False,
norm_activation=common.NormActivation(
activation='mish',
use_sync_bn=True,
norm_epsilon=0.001,
norm_momentum=0.97),
head=YoloHead(smart_bias=True),
loss=YoloLoss(use_scaled_loss=True),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401])
])),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
aug_rand_saturation=0.7,
aug_rand_brightness=0.4,
aug_rand_hue=0.015,
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
random_pad=False,
area_thresh=0.1,
max_num_instances=max_num_instances,
mosaic=Mosaic(
mosaic_crop_mode='scale',
mosaic_frequency=1.0,
mixup_frequency=0.0,
))),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
dtype='float32',
parser=Parser(
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.1,
max_num_instances=max_num_instances,
))),
trainer=cfg.TrainerConfig(
train_steps=train_epochs * steps_per_epoch,
validation_steps=20,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=5 * steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9999,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.937,
'momentum_start': 0.8,
'nesterov': True,
'warmup_steps': steps_per_epoch * warmup_epochs,
'weight_decay': 0.0005,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.01,
'alpha': 0.2,
'decay_steps': train_epochs * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': steps_per_epoch * warmup_epochs,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 18,648 | 33.155678 | 85 | py |
models | models-master/official/projects/yolo/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
import numpy as np
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import yolo
from official.projects.yolo.configs import yolov7
from official.projects.yolo.modeling import factory
from official.projects.yolo.modeling.backbones import darknet
from official.projects.yolo.modeling.backbones import yolov7 as yolov7_backbone
from official.projects.yolo.modeling.decoders import yolo_decoder
from official.projects.yolo.modeling.decoders import yolov7 as yolov7_decoder
from official.projects.yolo.modeling.heads import yolo_head as heads
from official.projects.yolo.modeling.heads import yolov7_head
from official.projects.yolo.modeling.layers import detection_generator
# pylint: enable=unused-import
class FactoryTest(tf.test.TestCase):
def test_yolo_builder(self):
num_classes = 3
input_size = 640
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
model_config = yolo.Yolo(
num_classes=num_classes,
head=yolo.YoloHead(smart_bias=True),
anchor_boxes=yolo.AnchorBoxes(
anchors_per_scale=3,
boxes=[
yolo.Box(box=[12, 16]),
yolo.Box(box=[19, 36]),
yolo.Box(box=[40, 28]),
yolo.Box(box=[36, 75]),
yolo.Box(box=[76, 55]),
yolo.Box(box=[72, 146]),
yolo.Box(box=[142, 110]),
yolo.Box(box=[192, 243]),
yolo.Box(box=[459, 401])
]))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
yolo_model, _ = factory.build_yolo(
input_specs=input_specs,
model_config=model_config,
l2_regularization=l2_regularizer)
# Do forward pass.
inputs = np.random.rand(2, input_size, input_size, 3)
_ = yolo_model(inputs)
def test_yolov7_builder(self):
num_classes = 3
input_size = 640
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3]
)
model_config = yolov7.YoloV7(
num_classes=num_classes,
head=yolov7.YoloV7Head(),
anchor_boxes=yolo.AnchorBoxes(
anchors_per_scale=3,
boxes=[
yolo.Box(box=[12, 16]),
yolo.Box(box=[19, 36]),
yolo.Box(box=[40, 28]),
yolo.Box(box=[36, 75]),
yolo.Box(box=[76, 55]),
yolo.Box(box=[72, 146]),
yolo.Box(box=[142, 110]),
yolo.Box(box=[192, 243]),
yolo.Box(box=[459, 401]),
],
),
)
l2_regularizer = tf.keras.regularizers.l2(5e-5)
yolo_model = factory.build_yolov7(
input_specs=input_specs,
model_config=model_config,
l2_regularization=l2_regularizer,
)
# Do forward pass.
inputs = np.random.rand(2, input_size, input_size, 3)
_ = yolo_model(inputs)
if __name__ == '__main__':
tf.test.main()
| 3,665 | 32.944444 | 79 | py |
models | models-master/official/projects/yolo/modeling/yolov7_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 models."""
from typing import Mapping, Union, Any, Dict
from absl import logging
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class YoloV7(tf.keras.Model):
"""The YOLOv7 model class."""
def __init__(self, backbone, decoder, head, detection_generator, **kwargs):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model` a backbone network.
decoder: `tf.keras.Model` a decoder network.
head: `RetinaNetHead`, the RetinaNet head.
detection_generator: the detection generator.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'detection_generator': detection_generator
}
# model components
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._fused = False
return
def call(self,
inputs: tf.Tensor,
training: bool = None,
mask: Any = None) -> Dict[str, tf.Tensor]:
backbone_outputs = self.backbone(inputs)
decoder_outputs = self.decoder(backbone_outputs)
raw_outputs = self.head(decoder_outputs)
if training:
return {'raw_output': raw_outputs}
else:
# Post-processing.
predictions = self.detection_generator(raw_outputs)
predictions.update({'raw_output': raw_outputs})
return predictions
@property
def backbone(self):
return self._backbone
@property
def decoder(self):
return self._decoder
@property
def head(self):
return self._head
@property
def detection_generator(self):
return self._detection_generator
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
def fuse(self):
"""Performs re-parameterization on ConvBN and RepConv layers."""
logging.info('Fusing ConvBN and RepConv layers.')
if not self._fused:
self._fused = True
for layer in self.submodules:
if isinstance(layer, (nn_blocks.ConvBN, nn_blocks.RepConv)):
layer.fuse()
self.summary()
return
| 3,205 | 28.145455 | 77 | py |
models | models-master/official/projects/yolo/modeling/yolo_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo models."""
from typing import Mapping, Union, Any, Dict
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class Yolo(tf.keras.Model):
"""The YOLO model class."""
def __init__(self,
backbone,
decoder,
head,
detection_generator,
**kwargs):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model` a backbone network.
decoder: `tf.keras.Model` a decoder network.
head: `RetinaNetHead`, the RetinaNet head.
detection_generator: the detection generator.
**kwargs: keyword arguments to be passed.
"""
super(Yolo, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'detection_generator': detection_generator
}
# model components
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._fused = False
def call(self,
inputs: tf.Tensor,
training: bool = None,
mask: Any = None) -> Dict[str, tf.Tensor]:
maps = self.backbone(inputs)
decoded_maps = self.decoder(maps)
raw_predictions = self.head(decoded_maps)
if training:
return {'raw_output': raw_predictions}
else:
# Post-processing.
predictions = self.detection_generator(raw_predictions)
predictions.update({'raw_output': raw_predictions})
return predictions
@property
def backbone(self):
return self._backbone
@property
def decoder(self):
return self._decoder
@property
def head(self):
return self._head
@property
def detection_generator(self):
return self._detection_generator
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
def fuse(self):
"""Fuses all Convolution and Batchnorm layers to get better latency."""
print('Fusing Conv Batch Norm Layers.')
if not self._fused:
self._fused = True
for layer in self.submodules:
if isinstance(layer, nn_blocks.ConvBN):
layer.fuse()
self.summary()
return
| 3,210 | 27.415929 | 75 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains decoder architectures for YOLOv7 families.
The models are built with ELAN and E-ELAN.
ELAN was proposed in:
[1] Wang, Chien-Yao and Liao, Hong-Yuan Mark and Yeh, I-Hau
Designing Network Design Strategies Through Gradient Path Analysis
arXiv:2211.04800
E-ELAN is proposed in YOLOv7 paper:
[1] Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark
YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time
object detectors
arXiv:2207.02696
"""
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.projects.yolo.ops import initializer_ops
from official.vision.modeling.decoders import factory
# Required block functions for YOLOv7 decoder familes.
_BLOCK_FNS = {
'convbn': nn_blocks.ConvBN,
'upsample2d': tf.keras.layers.UpSampling2D,
'maxpool2d': tf.keras.layers.MaxPooling2D,
'concat': tf.keras.layers.Concatenate,
'sppcspc': nn_blocks.SPPCSPC,
'repconv': nn_blocks.RepConv,
}
# Names for key arguments needed by each block function.
# Note that for field `from`, it can be either an integer or a str. Use of int
# means that the previous layer comes from a decoder intermediate output, while
# str means that the previous layer comes from the backbone output at a specific
# level.
_BLOCK_SPEC_SCHEMAS = {
'convbn': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
'upsample2d': [
'block_fn',
'from',
'size',
'interpolation',
'is_output',
],
'maxpool2d': [
'block_fn',
'from',
'pool_size',
'strides',
'padding',
'is_output',
],
'concat': [
'block_fn',
'from',
'axis',
'is_output',
],
'sppcspc': ['block_fn', 'from', 'filters', 'is_output'],
'repconv': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
}
# Define specs for YOLOv7-tiny variant. It is recommended to use together with
# YOLOv7-tiny backbone.
_YoloV7Tiny = [
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['maxpool2d', -1, 5, 1, 'same', False],
['maxpool2d', -2, 9, 1, 'same', False],
['maxpool2d', -3, 13, 1, 'same', False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, False],
['concat', [-1, -7], -1, False],
['convbn', -1, 1, 1, 256, False], # 8
['convbn', -1, 1, 1, 128, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 128, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, False], # 18
['convbn', -1, 1, 1, 64, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 64, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 32, False],
['convbn', -2, 1, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 64, False], # 28
['convbn', -1, 3, 2, 128, False],
['concat', [-1, 18], -1, False],
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, False], # 36
['convbn', -1, 3, 2, 256, False],
['concat', [-1, 8], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, False], # 44
['convbn', 28, 1, 1, 128, True],
['convbn', 36, 1, 1, 256, True],
['convbn', 44, 1, 1, 512, True],
]
# Define specs YOLOv7 variant. The spec schema is defined above.
# It is recommended to use together with YOLOv7 backbone.
_YoloV7 = [
['sppcspc', -1, 512, False], # 0
['convbn', -1, 1, 1, 256, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 256, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 12
['convbn', -1, 1, 1, 128, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 128, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 128, False], # 24
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 128, False],
['convbn', -3, 1, 1, 128, False],
['convbn', -1, 3, 2, 128, False],
['concat', [-1, -3, 12], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 37
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 256, False],
['convbn', -3, 1, 1, 256, False],
['convbn', -1, 3, 2, 256, False],
['concat', [-1, -3, 0], -1, False],
['convbn', -1, 1, 1, 512, False],
['convbn', -2, 1, 1, 512, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 512, False], # 50
['repconv', 24, 3, 1, 256, True],
['repconv', 37, 3, 1, 512, True],
['repconv', 50, 3, 1, 1024, True],
]
_YoloV7X = [
['sppcspc', -1, 640, False], # 0
['convbn', -1, 1, 1, 320, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 320, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 14
['convbn', -1, 1, 1, 160, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 160, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 160, False], # 28
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 160, False],
['convbn', -3, 1, 1, 160, False],
['convbn', -1, 3, 2, 160, False],
['concat', [-1, -3, 14], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 43
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 320, False],
['convbn', -3, 1, 1, 320, False],
['convbn', -1, 3, 2, 320, False],
['concat', [-1, -3, 0], -1, False],
['convbn', -1, 1, 1, 512, False],
['convbn', -2, 1, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 640, False], # 58
['repconv', 28, 3, 1, 320, True],
['repconv', 43, 3, 1, 640, True],
['repconv', 58, 3, 1, 1280, True],
]
# Aggregates all variants for YOLOv7 decoders.
DECODERS = {
'yolov7-tiny': _YoloV7Tiny,
'yolov7': _YoloV7,
'yolov7x': _YoloV7X,
}
class YoloV7(tf.keras.Model):
"""YOLOv7 decoder architecture."""
def __init__(
self,
input_specs,
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
use_separable_conv=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs,
):
"""Initializes the YOLOv7 decoder.
Args:
input_specs: a dictionary of `tf.TensorShape` from backbone outputs.
model_id: a `str` represents the model variants.
use_sync_bn: if set to `True`, use synchronized batch normalization.
norm_momentum: a `float` of normalization momentum for the moving average.
norm_epsilon: a small `float` added to variance to avoid dividing by zero.
activation: a `str` name of the activation function.
use_separable_conv: `bool` wether to use separable convs.
kernel_initializer: a `str` for kernel initializer of convolutional
layers.
kernel_regularizer: a `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_initializer: a `str` for bias initializer of convolutional layers.
bias_regularizer: a `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._input_specs = input_specs
self._model_id = model_id
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._use_separable_conv = use_separable_conv
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
inputs = self._generate_inputs(input_specs)
outputs = []
endpoints = {}
level = int(min(inputs.keys()))
block_specs = DECODERS[model_id.lower()]
for spec in block_specs:
block_kwargs = dict(zip(_BLOCK_SPEC_SCHEMAS[spec[0]], spec))
block_fn_str = block_kwargs.pop('block_fn')
from_index = block_kwargs.pop('from')
is_output = block_kwargs.pop('is_output')
x = self._group_layer_inputs(from_index, inputs, outputs)
if block_fn_str in ['convbn', 'sppcspc', 'repconv']:
block_kwargs.update({
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'use_separable_conv': self._use_separable_conv,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
})
block_fn = _BLOCK_FNS[block_fn_str](**block_kwargs)
x = block_fn(x)
outputs.append(x)
if is_output:
endpoints[str(level)] = x
level += 1
self._output_specs = {k: v.get_shape() for k, v in endpoints.items()}
super().__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _generate_inputs(self, input_specs):
inputs = {}
for level, input_shape in input_specs.items():
inputs[level] = tf.keras.layers.Input(shape=input_shape[1:])
return inputs
def _group_layer_inputs(self, from_index, inputs, outputs):
if isinstance(from_index, list):
return [self._group_layer_inputs(i, inputs, outputs) for i in from_index]
if isinstance(from_index, int):
# Need last layer output from backbone.
if len(outputs) + from_index == -1:
return inputs[max(inputs.keys())]
return outputs[from_index]
return inputs[from_index] # from_index is a string.
def get_config(self):
config_dict = {
'input_specs': self._input_specs,
'model_id': self._model_id,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('yolov7')
def build_yolov7(
input_specs: tf.keras.layers.InputSpec,
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds YOLOv7 decoder."""
decoder_config = model_config.decoder
norm_activation_config = model_config.norm_activation
assert (
decoder_config.type == 'yolov7'
), f'Inconsistent decoder type {decoder_config.type}.'
decoder_config = decoder_config.get()
assert (
decoder_config.model_id in DECODERS
), f'Unsupported decoder {decoder_config.model_id}.'
model = YoloV7(
model_id=decoder_config.model_id,
input_specs=input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer,
use_separable_conv=decoder_config.use_separable_conv,
)
return model
| 15,634 | 32.623656 | 80 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolo_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature Pyramid Network and Path Aggregation variants used in YOLO."""
from typing import Mapping, Optional, Union
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.vision.modeling.decoders import factory
# model configurations
# the structure is as follows. model version, {v3, v4, v#, ... etc}
# the model config type {regular, tiny, small, large, ... etc}
YOLO_MODELS = {
'v4':
dict(
regular=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
path_process_len=6),
tiny=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
csp=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=5,
fpn_depth=5,
path_process_len=6),
csp_large=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=7,
fpn_depth=7,
max_fpn_depth=5,
max_csp_stack=5,
path_process_len=8,
fpn_filter_scale=1),
csp_xlarge=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=7,
fpn_depth=7,
path_process_len=8,
fpn_filter_scale=1),
),
'v3':
dict(
regular=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=None,
path_process_len=6),
tiny=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
spp=dict(
embed_spp=True,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
),
}
class _IdentityRoute(tf.keras.layers.Layer):
def call(self, inputs): # pylint: disable=arguments-differ
return None, inputs
class YoloFPN(tf.keras.layers.Layer):
"""YOLO Feature pyramid network."""
def __init__(self,
fpn_depth=4,
max_fpn_depth=None,
max_csp_stack=None,
use_spatial_attention=False,
csp_stack=False,
activation='leaky',
fpn_filter_scale=1,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Yolo FPN initialization function (Yolo V4).
Args:
fpn_depth: `int`, number of layers to use in each FPN path
if you choose to use an FPN.
max_fpn_depth: `int`, number of layers to use in each FPN path
if you choose to use an FPN along the largest FPN level.
max_csp_stack: `int`, number of layers to use for CSP on the largest_path
only.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
activation: `str`, the activation function to use typically leaky or mish.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` whether to use separable convs.
norm_momentum: `float`, normalization momentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._fpn_depth = fpn_depth
self._max_fpn_depth = max_fpn_depth or self._fpn_depth
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_spatial_attention = use_spatial_attention
self._filter_scale = fpn_filter_scale
self._csp_stack = csp_stack
self._max_csp_stack = max_csp_stack or min(self._max_fpn_depth, csp_stack)
self._base_config = dict(
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
bias_regularizer=self._bias_regularizer,
norm_epsilon=self._norm_epsilon,
norm_momentum=self._norm_momentum)
def get_raw_depths(self, minimum_depth, inputs):
"""Calculates the unscaled depths of the FPN branches.
Args:
minimum_depth (int): depth of the smallest branch of the FPN.
inputs (dict): dictionary of the shape of input args as a dictionary of
lists.
Returns:
The unscaled depths of the FPN branches.
"""
depths = []
for i in range(self._min_level, self._max_level + 1):
depths.append(inputs[str(i)][-1] / self._filter_scale)
return list(reversed(depths))
def build(self, inputs):
"""Use config dictionary to generate all important attributes for head.
Args:
inputs: dictionary of the shape of input args as a dictionary of lists.
"""
keys = [int(key) for key in inputs.keys()]
self._min_level = min(keys)
self._max_level = max(keys)
self._min_depth = inputs[str(self._min_level)][-1]
self._depths = self.get_raw_depths(self._min_depth, inputs)
# directly connect to an input path and process it
self.preprocessors = dict()
# resample an input and merge it with the output of another path
# inorder to aggregate backbone outputs
self.resamples = dict()
# set of convoltion layers and upsample layers that are used to
# prepare the FPN processors for output
for level, depth in zip(
reversed(range(self._min_level, self._max_level + 1)), self._depths):
if level == self._min_level:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=depth // 2,
inverted=True,
upsample=True,
drop_final=self._csp_stack == 0,
upsample_size=2,
**self._base_config)
self.preprocessors[str(level)] = _IdentityRoute()
elif level != self._max_level:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=depth // 2,
inverted=True,
upsample=True,
drop_final=False,
upsample_size=2,
**self._base_config)
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=depth,
repetitions=self._fpn_depth - int(level == self._min_level),
block_invert=True,
insert_spp=False,
csp_stack=self._csp_stack,
**self._base_config)
else:
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=depth,
repetitions=self._max_fpn_depth + 1 * int(self._csp_stack == 0),
insert_spp=True,
block_invert=False,
csp_stack=min(self._csp_stack, self._max_fpn_depth),
**self._base_config)
def call(self, inputs):
outputs = dict()
layer_in = inputs[str(self._max_level)]
for level in reversed(range(self._min_level, self._max_level + 1)):
_, x = self.preprocessors[str(level)](layer_in)
outputs[str(level)] = x
if level > self._min_level:
x_next = inputs[str(level - 1)]
_, layer_in = self.resamples[str(level - 1)]([x_next, x])
return outputs
class YoloPAN(tf.keras.layers.Layer):
"""YOLO Path Aggregation Network."""
def __init__(self,
path_process_len=6,
max_level_process_len=None,
embed_spp=False,
use_spatial_attention=False,
csp_stack=False,
activation='leaky',
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
fpn_input=True,
fpn_filter_scale=1.0,
**kwargs):
"""Yolo Path Aggregation Network initialization function (Yolo V3 and V4).
Args:
path_process_len: `int`, number of layers ot use in each Decoder path.
max_level_process_len: `int`, number of layers ot use in the largest
processing path, or the backbones largest output if it is different.
embed_spp: `bool`, use the SPP found in the YoloV3 and V4 model.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
activation: `str`, the activation function to use typically leaky or mish.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` whether to use separable convs.
norm_momentum: `float`, normalization omentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing
by zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
fpn_input: `bool`, for whether the input into this fucntion is an FPN or
a backbone.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._path_process_len = path_process_len
self._embed_spp = embed_spp
self._use_spatial_attention = use_spatial_attention
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._fpn_input = fpn_input
self._max_level_process_len = max_level_process_len
self._csp_stack = csp_stack
self._fpn_filter_scale = fpn_filter_scale
if max_level_process_len is None:
self._max_level_process_len = path_process_len
self._base_config = dict(
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
bias_regularizer=self._bias_regularizer,
norm_epsilon=self._norm_epsilon,
norm_momentum=self._norm_momentum)
def build(self, inputs):
"""Use config dictionary to generate all important attributes for head.
Args:
inputs: dictionary of the shape of input args as a dictionary of lists.
"""
# define the key order
keys = [int(key) for key in inputs.keys()]
self._min_level = min(keys)
self._max_level = max(keys)
self._min_depth = inputs[str(self._min_level)][-1]
self._depths = self.get_raw_depths(self._min_depth, inputs)
# directly connect to an input path and process it
self.preprocessors = dict()
# resample an input and merge it with the output of another path
# inorder to aggregate backbone outputs
self.resamples = dict()
# FPN will reverse the key process order for the backbone, so we need
# adjust the order that objects are created and processed to adjust for
# this. not using an FPN will directly connect the decoder to the backbone
# therefore the object creation order needs to be done from the largest
# to smallest level.
if self._fpn_input:
# process order {... 3, 4, 5}
self._iterator = range(self._min_level, self._max_level + 1)
self._check = lambda x: x < self._max_level
self._key_shift = lambda x: x + 1
self._input = self._min_level
downsample = True
upsample = False
else:
# process order {5, 4, 3, ...}
self._iterator = list(
reversed(range(self._min_level, self._max_level + 1)))
self._check = lambda x: x > self._min_level
self._key_shift = lambda x: x - 1
self._input = self._max_level
downsample = False
upsample = True
for level, depth in zip(self._iterator, self._depths):
if level > 5:
proc_filters = lambda x: x * 2
resample_filters = lambda x: x
elif self._csp_stack == 0:
proc_filters = lambda x: x
resample_filters = lambda x: x // 2
else:
proc_filters = lambda x: x * 2
resample_filters = lambda x: x
if level == self._input:
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=proc_filters(depth),
repetitions=self._max_level_process_len,
insert_spp=self._embed_spp,
block_invert=False,
insert_sam=self._use_spatial_attention,
csp_stack=self._csp_stack,
**self._base_config)
else:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=resample_filters(depth),
upsample=upsample,
downsample=downsample,
inverted=False,
drop_final=self._csp_stack == 0,
**self._base_config)
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=proc_filters(depth),
repetitions=self._path_process_len,
insert_spp=False,
insert_sam=self._use_spatial_attention,
csp_stack=self._csp_stack,
**self._base_config)
def get_raw_depths(self, minimum_depth, inputs):
"""Calculates the unscaled depths of the FPN branches.
Args:
minimum_depth: `int` depth of the smallest branch of the FPN.
inputs: `dict[str, tf.InputSpec]` of the shape of input args as a
dictionary of lists.
Returns:
The unscaled depths of the FPN branches.
"""
depths = []
if len(inputs.keys()) > 3 or self._fpn_filter_scale > 1:
for i in range(self._min_level, self._max_level + 1):
depths.append(inputs[str(i)][-1])
else:
for _ in range(self._min_level, self._max_level + 1):
depths.append(minimum_depth)
minimum_depth *= 2
if self._fpn_input:
return depths
return list(reversed(depths))
def call(self, inputs):
outputs = dict()
layer_in = inputs[str(self._input)]
for level in self._iterator:
x_route, x = self.preprocessors[str(level)](layer_in)
outputs[str(level)] = x
if self._check(level):
x_next = inputs[str(self._key_shift(level))]
_, layer_in = self.resamples[str(
self._key_shift(level))]([x_route, x_next])
return outputs
class YoloDecoder(tf.keras.Model):
"""Darknet Backbone Decoder."""
def __init__(self,
input_specs,
use_fpn=False,
use_spatial_attention=False,
csp_stack=False,
fpn_depth=4,
max_fpn_depth=None,
max_csp_stack=None,
fpn_filter_scale=1,
path_process_len=6,
max_level_process_len=None,
embed_spp=False,
activation='leaky',
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Yolo Decoder initialization function.
A unified model that ties all decoder components into a conditionally build
YOLO decoder.
Args:
input_specs: `dict[str, tf.InputSpec]`: input specs of each of the inputs
to the heads.
use_fpn: `bool`, use the FPN found in the YoloV4 model.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
fpn_depth: `int`, number of layers ot use in each FPN path if you choose
to use an FPN.
max_fpn_depth: `int`, maximum fpn depth.
max_csp_stack: `int`, maximum csp stack.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
path_process_len: `int`, number of layers ot use in each Decoder path.
max_level_process_len: `int`, number of layers ot use in the largest
processing path, or the backbones largest output if it is different.
embed_spp: `bool`, use the SPP found in the YoloV3 and V4 model.
activation: `str`, the activation function to use typically leaky or mish.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: `float`, normalization omentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._use_fpn = use_fpn
self._fpn_depth = fpn_depth
self._max_fpn_depth = max_fpn_depth
self._max_csp_stack = max_csp_stack
self._path_process_len = path_process_len
self._max_level_process_len = max_level_process_len
self._embed_spp = embed_spp
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._base_config = dict(
use_spatial_attention=use_spatial_attention,
csp_stack=csp_stack,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
fpn_filter_scale=fpn_filter_scale,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._decoder_config = dict(
path_process_len=self._path_process_len,
max_level_process_len=self._max_level_process_len,
embed_spp=self._embed_spp,
fpn_input=self._use_fpn,
**self._base_config)
inputs = {
key: tf.keras.layers.Input(shape=value[1:])
for key, value in input_specs.items()
}
if self._use_fpn:
inter_outs = YoloFPN(
fpn_depth=self._fpn_depth,
max_fpn_depth=self._max_fpn_depth,
max_csp_stack=self._max_csp_stack,
**self._base_config)(inputs)
outputs = YoloPAN(**self._decoder_config)(inter_outs)
else:
inter_outs = None
outputs = YoloPAN(**self._decoder_config)(inputs)
self._output_specs = {key: value.shape for key, value in outputs.items()}
super().__init__(inputs=inputs, outputs=outputs, name='YoloDecoder')
@property
def use_fpn(self):
return self._use_fpn
@property
def output_specs(self):
return self._output_specs
def get_config(self):
config = dict(
input_specs=self._input_specs,
use_fpn=self._use_fpn,
fpn_depth=self._fpn_depth,
**self._decoder_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@factory.register_decoder_builder('yolo_decoder')
def build_yolo_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]:
"""Builds Yolo FPN/PAN decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
**kwargs: Additional kwargs arguments.
Returns:
A `tf.keras.Model` instance of the Yolo FPN/PAN decoder.
"""
decoder_cfg = model_config.decoder.get()
norm_activation_config = model_config.norm_activation
activation = (
decoder_cfg.activation if decoder_cfg.activation != 'same' else
norm_activation_config.activation)
if decoder_cfg.version is None: # custom yolo
raise ValueError('Decoder version cannot be None, specify v3 or v4.')
if decoder_cfg.version not in YOLO_MODELS:
raise ValueError(
'Unsupported model version please select from {v3, v4}, '
'or specify a custom decoder config using YoloDecoder in you yaml')
if decoder_cfg.type is None:
decoder_cfg.type = 'regular'
if decoder_cfg.type not in YOLO_MODELS[decoder_cfg.version]:
raise ValueError('Unsupported model type please select from '
'{yolo_model.YOLO_MODELS[decoder_cfg.version].keys()}'
'or specify a custom decoder config using YoloDecoder.')
base_model = YOLO_MODELS[decoder_cfg.version][decoder_cfg.type].copy()
cfg_dict = decoder_cfg.as_dict()
for key in base_model:
if cfg_dict[key] is not None:
base_model[key] = cfg_dict[key]
base_dict = dict(
activation=activation,
use_spatial_attention=decoder_cfg.use_spatial_attention,
use_separable_conv=decoder_cfg.use_separable_conv,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
base_model.update(base_dict)
model = YoloDecoder(input_specs, **base_model, **kwargs)
return model
| 23,635 | 36.222047 | 80 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolov7_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 decoder."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import yolov7 as backbone
from official.projects.yolo.modeling.decoders import yolov7 as decoder
_INPUT_SIZE = (224, 224)
class YoloV7DecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 decoder variants."""
tf.keras.backend.set_image_data_format('channels_last')
backbone_network = backbone.YoloV7(model_id)
decoder_network = decoder.YoloV7(backbone_network.output_specs, model_id)
self.assertEqual(decoder_network.get_config()['model_id'], model_id)
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = decoder_network(backbone_network(inputs))
for level, level_output in outputs.items():
scale = 2 ** int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
self.assertAllEqual((1, *input_size), level_output.shape.as_list()[:-1])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
)
def test_sync_bn_multiple_devices(self, strategy):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, *_INPUT_SIZE, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
backbone_network = backbone.YoloV7(model_id='yolov7', use_sync_bn=True)
decoder_network = decoder.YoloV7(
backbone_network.output_specs, model_id='yolov7', use_sync_bn=True)
_ = decoder_network(backbone_network(inputs))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
backbone_network = backbone.YoloV7(**kwargs)
kwargs['input_specs'] = backbone_network.output_specs
decoder_network = decoder.YoloV7(**kwargs)
# Create another network object from the first object's config.
new_network = decoder.YoloV7.from_config(decoder_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(decoder_network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,560 | 34.969697 | 79 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolo_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for YOLO."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.decoders import yolo_decoder as decoders
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
def _build_yolo_decoder(self, input_specs, name='1'):
# Builds 4 different arbitrary decoders.
if name == '1':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1,
activation='mish')
elif name == '6spp':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=True,
use_fpn=False,
max_level_process_len=None,
path_process_len=6,
activation='mish')
elif name == '6sppfpn':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=True,
use_fpn=True,
max_level_process_len=None,
path_process_len=6,
activation='mish')
elif name == '6':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=False,
use_fpn=False,
max_level_process_len=None,
path_process_len=6,
activation='mish')
else:
raise NotImplementedError(f'YOLO decoder test {type} not implemented.')
return model
@parameterized.parameters('1', '6spp', '6sppfpn', '6')
def test_network_creation(self, version):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, version)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = decoder.call(inputs)
for key in endpoints.keys():
self.assertAllEqual(endpoints[key].shape.as_list(), input_shape[key])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder.call(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder(inputs)
def test_serialize_deserialize(self):
"""Create a network object that sets all of its config options."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder(inputs)
config = decoder.get_config()
decoder_from_config = decoders.YoloDecoder.from_config(config)
self.assertAllEqual(decoder.get_config(), decoder_from_config.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,804 | 30.405229 | 79 | py |
models | models-master/official/projects/yolo/modeling/layers/detection_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for yolo layer (detection layer)."""
from typing import Optional
import tensorflow as tf
from official.projects.yolo.losses import yolo_loss
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import loss_utils
from official.vision.modeling.layers import detection_generator
class YoloLayer(tf.keras.layers.Layer):
"""Yolo layer (detection generator)."""
def __init__(
self,
anchors,
classes,
apply_nms=True,
iou_thresh=0.0,
ignore_thresh=0.7,
truth_thresh=1.0,
nms_thresh=0.6,
max_delta=10.0,
loss_type='ciou',
iou_normalizer=1.0,
cls_normalizer=1.0,
object_normalizer=1.0,
use_scaled_loss=False,
update_on_repeat=False,
pre_nms_points=5000,
label_smoothing=0.0,
max_boxes=200,
box_type='original',
path_scale=None,
scale_xy=None,
nms_version='greedy',
objectness_smooth=False,
use_class_agnostic_nms: Optional[bool] = False,
**kwargs
):
"""Parameters for the loss functions used at each detection head output.
Args:
anchors: `List[List[int]]` for the anchor boxes that are used in the
model.
classes: `int` for the number of classes.
apply_nms: A boolean indicating whether to apply NMS.
iou_thresh: `float` to use many anchors per object if IoU(Obj, Anchor) >
iou_thresh.
ignore_thresh: `float` for the IOU value over which the loss is not
propagated, and a detection is assumed to have been made.
truth_thresh: `float` for the IOU value over which the loss is propagated
despite a detection being made'.
nms_thresh: `float` for the minimum IOU value for an overlap.
max_delta: gradient clipping to apply to the box loss.
loss_type: `str` for the typeof iou loss to use with in {ciou, diou, giou,
iou}.
iou_normalizer: `float` for how much to scale the loss on the IOU or the
boxes.
cls_normalizer: `float` for how much to scale the loss on the classes.
object_normalizer: `float` for how much to scale loss on the detection
map.
use_scaled_loss: `bool` for whether to use the scaled loss or the
traditional loss.
update_on_repeat: `bool` indicating how you would like to handle repeated
indexes in a given [j, i] index. Setting this to True will give more
consistent MAP, setting it to falls will improve recall by 1-2% but will
sacrifice some MAP.
pre_nms_points: `int` number of top candidate detections per class before
NMS.
label_smoothing: `float` for how much to smooth the loss on the classes.
max_boxes: `int` for the maximum number of boxes retained over all
classes.
box_type: `str`, there are 3 different box types that will affect training
differently {original, scaled and anchor_free}. The original method
decodes the boxes by applying an exponential to the model width and
height maps, then scaling the maps by the anchor boxes. This method is
used in Yolo-v4, Yolo-v3, and all its counterparts. The Scale method
squares the width and height and scales both by a fixed factor of 4.
This method is used in the Scale Yolo models, as well as Yolov4-CSP.
Finally, anchor_free is like the original method but will not apply an
activation function to the boxes, this is used for some of the newer
anchor free versions of YOLO.
path_scale: `dict` for the size of the input tensors. Defaults to
precalulated values from the `mask`.
scale_xy: dictionary `float` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level.
nms_version: `str` for which non max suppression to use.
objectness_smooth: `float` for how much to smooth the loss on the
detection map.
use_class_agnostic_nms: A `bool` of whether non max suppression is
operated on all the boxes using max scores across all classes. Only
valid when nms_version is v2.
**kwargs: Addtional keyword arguments.
"""
super().__init__(**kwargs)
self._anchors = anchors
self._apply_nms = apply_nms
self._thresh = iou_thresh
self._ignore_thresh = ignore_thresh
self._truth_thresh = truth_thresh
self._iou_normalizer = iou_normalizer
self._cls_normalizer = cls_normalizer
self._object_normalizer = object_normalizer
self._objectness_smooth = objectness_smooth
self._nms_thresh = nms_thresh
self._max_boxes = max_boxes
self._max_delta = max_delta
self._classes = classes
self._loss_type = loss_type
self._use_class_agnostic_nms = use_class_agnostic_nms
self._use_scaled_loss = use_scaled_loss
self._update_on_repeat = update_on_repeat
self._pre_nms_points = pre_nms_points
self._label_smoothing = label_smoothing
self._keys = list(anchors.keys())
self._len_keys = len(self._keys)
self._box_type = box_type
self._path_scale = path_scale or {key: 2**int(key) for key in self._keys}
self._nms_version = nms_version
self._scale_xy = scale_xy or {key: 1.0 for key, _ in anchors.items()}
self._generator = {}
self._len_mask = {}
for key in self._keys:
anchors = self._anchors[key]
self._generator[key] = loss_utils.GridGenerator(
anchors, scale_anchors=self._path_scale[key])
self._len_mask[key] = len(anchors)
return
def parse_prediction_path(self, key, inputs):
shape_ = tf.shape(inputs)
shape = inputs.get_shape().as_list()
batchsize, height, width = shape_[0], shape[1], shape[2]
if height is None or width is None:
height, width = shape_[1], shape_[2]
generator = self._generator[key]
len_mask = self._len_mask[key]
scale_xy = self._scale_xy[key]
# Reshape the yolo output to (batchsize,
# width,
# height,
# number_anchors,
# remaining_points)
data = tf.reshape(inputs, [-1, height, width, len_mask, self._classes + 5])
# Use the grid generator to get the formatted anchor boxes and grid points
# in shape [1, height, width, 2].
centers, anchors = generator(height, width, batchsize, dtype=data.dtype)
# Split the yolo detections into boxes, object score map, classes.
boxes, obns_scores, class_scores = tf.split(
data, [4, 1, self._classes], axis=-1)
# Determine the number of classes.
classes = class_scores.get_shape().as_list()[-1]
# Configurable to use the new coordinates in scaled Yolo v4 or not.
_, _, boxes = loss_utils.get_predicted_box(
tf.cast(height, data.dtype),
tf.cast(width, data.dtype),
boxes,
anchors,
centers,
scale_xy,
stride=self._path_scale[key],
darknet=False,
box_type=self._box_type[key])
# Convert boxes from yolo(x, y, w. h) to tensorflow(ymin, xmin, ymax, xmax).
boxes = box_ops.xcycwh_to_yxyx(boxes)
# Activate and detection map
obns_scores = tf.math.sigmoid(obns_scores)
# Convert detection map to class detection probabilities.
class_scores = tf.math.sigmoid(class_scores) * obns_scores
# Flatten predictions to [batchsize, N, -1] for non max supression.
fill = height * width * len_mask
boxes = tf.reshape(boxes, [-1, fill, 4])
class_scores = tf.reshape(class_scores, [-1, fill, classes])
obns_scores = tf.reshape(obns_scores, [-1, fill])
return obns_scores, boxes, class_scores
def __call__(self, inputs):
boxes = []
class_scores = []
object_scores = []
levels = list(inputs.keys())
min_level = int(min(levels))
max_level = int(max(levels))
# Aggregate boxes over each scale.
for i in range(min_level, max_level + 1):
key = str(i)
object_scores_, boxes_, class_scores_ = self.parse_prediction_path(
key, inputs[key])
boxes.append(boxes_)
class_scores.append(class_scores_)
object_scores.append(object_scores_)
# Collate all predicitons.
boxes = tf.concat(boxes, axis=1)
object_scores = tf.concat(object_scores, axis=1)
class_scores = tf.concat(class_scores, axis=1)
# Get masks to threshold all the predicitons.
object_mask = tf.cast(object_scores > self._thresh, object_scores.dtype)
class_mask = tf.cast(class_scores > self._thresh, class_scores.dtype)
# Apply thresholds mask to all the predictions.
object_scores *= object_mask
class_scores *= (tf.expand_dims(object_mask, axis=-1) * class_mask)
# Make a copy of the original dtype.
dtype = object_scores.dtype
if not self._apply_nms:
return {
'bbox': tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
'classes': tf.cast(class_scores, dtype=tf.float32),
'confidence': object_scores,
'num_detections': self._max_boxes,
}
# Apply nms.
if self._nms_version == 'greedy':
# Greedy NMS.
boxes, object_scores, class_scores, num_detections = (
tf.image.combined_non_max_suppression(
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
self._pre_nms_points,
self._max_boxes,
iou_threshold=self._nms_thresh,
score_threshold=self._thresh,
)
)
elif self._nms_version == 'v1':
(boxes, object_scores, class_scores, num_detections, _) = (
detection_generator._generate_detections_v1( # pylint:disable=protected-access
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
pre_nms_top_k=self._pre_nms_points,
max_num_detections=self._max_boxes,
nms_iou_threshold=self._nms_thresh,
pre_nms_score_threshold=self._thresh,
)
)
elif self._nms_version == 'v2' or self._nms_version == 'iou':
(boxes, object_scores, class_scores, num_detections) = (
detection_generator._generate_detections_v2( # pylint:disable=protected-access
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
pre_nms_top_k=self._pre_nms_points,
max_num_detections=self._max_boxes,
nms_iou_threshold=self._nms_thresh,
pre_nms_score_threshold=self._thresh,
use_class_agnostic_nms=self._use_class_agnostic_nms,
)
)
# Cast the boxes and predicitons back to original datatype.
boxes = tf.cast(boxes, dtype)
class_scores = tf.cast(class_scores, dtype)
object_scores = tf.cast(object_scores, dtype)
# Format and return
return {
'bbox': boxes,
'classes': class_scores,
'confidence': object_scores,
'num_detections': num_detections,
}
def get_losses(self):
"""Generates a dictionary of losses to apply to each path.
Done in the detection generator because all parameters are the same
across both loss and detection generator.
Returns:
Dict[str, tf.Tensor] of losses
"""
loss = yolo_loss.YoloLoss(
keys=self._keys,
classes=self._classes,
anchors=self._anchors,
path_strides=self._path_scale,
truth_thresholds=self._truth_thresh,
ignore_thresholds=self._ignore_thresh,
loss_types=self._loss_type,
iou_normalizers=self._iou_normalizer,
cls_normalizers=self._cls_normalizer,
object_normalizers=self._object_normalizer,
objectness_smooths=self._objectness_smooth,
box_types=self._box_type,
max_deltas=self._max_delta,
scale_xys=self._scale_xy,
use_scaled_loss=self._use_scaled_loss,
update_on_repeat=self._update_on_repeat,
label_smoothing=self._label_smoothing)
return loss
def get_config(self):
return {
'anchors': [list(a) for a in self._anchors],
'thresh': self._thresh,
'max_boxes': self._max_boxes,
}
| 13,274 | 38.044118 | 89 | py |
models | models-master/official/projects/yolo/modeling/layers/detection_generator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo detection generator."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.layers import detection_generator
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('v1', None),
('v2', False),
('v2', True),
('greedy', None),
)
def test_network_creation(self, nms_version, use_class_agnostic_nms):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 255],
'4': [1, 26, 26, 255],
'5': [1, 13, 13, 255]
}
classes = 80
anchors = {
'3': [[12.0, 19.0], [31.0, 46.0], [96.0, 54.0]],
'4': [[46.0, 114.0], [133.0, 127.0], [79.0, 225.0]],
'5': [[301.0, 150.0], [172.0, 286.0], [348.0, 340.0]]
}
box_type = {key: 'scaled' for key in anchors.keys()}
layer = detection_generator.YoloLayer(
anchors,
classes,
box_type=box_type,
max_boxes=10,
use_class_agnostic_nms=use_class_agnostic_nms,
nms_version=nms_version,
)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = layer(inputs)
boxes = endpoints['bbox']
classes = endpoints['classes']
self.assertAllEqual(boxes.shape.as_list(), [1, 10, 4])
self.assertAllEqual(classes.shape.as_list(), [1, 10])
if __name__ == '__main__':
tf.test.main()
| 2,139 | 29.140845 | 74 | py |
models | models-master/official/projects/yolo/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class CSPConnectTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)
test_layer2 = nn_blocks.CSPConnect(filters=filters, filter_scale=mod)
outx, px = test_layer(x)
outx = test_layer2([outx, px])
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters)])
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 128, 2))
def test_gradient_pass_though(self, filters, width, height, mod):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat, x_prev = test_layer(x)
x_hat = path_layer([x_hat, x_prev])
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class CSPRouteTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)
outx, _ = test_layer(x)
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters / mod)])
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 128, 2))
def test_gradient_pass_though(self, filters, width, height, mod):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat, x_prev = test_layer(x)
x_hat = path_layer([x_hat, x_prev])
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class ConvBNTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('valid', (3, 3), 'valid', (1, 1)), ('same', (3, 3), 'same', (1, 1)),
('downsample', (3, 3), 'same', (2, 2)), ('test', (1, 1), 'valid', (1, 1)))
def test_pass_through(self, kernel_size, padding, strides):
if padding == 'same':
pad_const = 1
else:
pad_const = 0
x = tf.keras.Input(shape=(224, 224, 3))
test_layer = nn_blocks.ConvBN(
filters=64,
kernel_size=kernel_size,
padding=padding,
strides=strides,
trainable=False)
outx = test_layer(x)
print(outx.shape.as_list())
test = [
None,
int((224 - kernel_size[0] + (2 * pad_const)) / strides[0] + 1),
int((224 - kernel_size[1] + (2 * pad_const)) / strides[1] + 1), 64
]
print(test)
self.assertAllEqual(outx.shape.as_list(), test)
@parameterized.named_parameters(('filters', 3))
def test_gradient_pass_though(self, filters):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
with tf.device('/CPU:0'):
test_layer = nn_blocks.ConvBN(filters, kernel_size=(3, 3), padding='same')
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, 224, 224, 3), dtype=tf.float32))
y = tf.Variable(
initial_value=init(shape=(1, 224, 224, filters), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, False),
('downsample', 223, 223, 32, True),
('oddball', 223, 223, 32, False))
def test_pass_through(self, width, height, filters, downsample):
mod = 1
if downsample:
mod = 2
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.DarkResidual(filters=filters, downsample=downsample)
outx = test_layer(x)
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width / mod),
np.ceil(height / mod), filters])
@parameterized.named_parameters(('same', 64, 224, 224, False),
('downsample', 32, 223, 223, True),
('oddball', 32, 223, 223, False))
def test_gradient_pass_though(self, filters, width, height, downsample):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.DarkResidual(filters, downsample=downsample)
if downsample:
mod = 2
else:
mod = 1
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width / mod)), int(np.ceil(height / mod)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class DarkSppTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),
('test1', 300, 300, 10, [2, 3, 4, 5]),
('test2', 256, 256, 5, [10]))
def test_pass_through(self, width, height, channels, sizes):
x = tf.keras.Input(shape=(width, height, channels))
test_layer = nn_blocks.SPP(sizes=sizes)
outx = test_layer(x)
self.assertAllEqual(outx.shape.as_list(),
[None, width, height, channels * (len(sizes) + 1)])
return
@parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),
('test1', 300, 300, 10, [2, 3, 4, 5]),
('test2', 256, 256, 5, [10]))
def test_gradient_pass_though(self, width, height, channels, sizes):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.SPP(sizes=sizes)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(
shape=(1, width, height, channels), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, width, height, channels * (len(sizes) + 1)),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class DarkRouteProcessTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),
('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))
def test_pass_through(self, width, height, filters, repetitions, spp):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.DarkRouteProcess(
filters=filters, repetitions=repetitions, insert_spp=spp)
outx = test_layer(x)
self.assertLen(outx, 2, msg='len(outx) != 2')
if repetitions == 1:
filter_y1 = filters
else:
filter_y1 = filters // 2
self.assertAllEqual(
outx[1].shape.as_list(), [None, width, height, filter_y1])
self.assertAllEqual(
filters % 2,
0,
msg='Output of a DarkRouteProcess layer has an odd number of filters')
self.assertAllEqual(outx[0].shape.as_list(), [None, width, height, filters])
@parameterized.named_parameters(
('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),
('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))
def test_gradient_pass_though(self, width, height, filters, repetitions, spp):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.DarkRouteProcess(
filters=filters, repetitions=repetitions, insert_spp=spp)
if repetitions == 1:
filter_y1 = filters
else:
filter_y1 = filters // 2
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y_0 = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y_1 = tf.Variable(
initial_value=init(
shape=(1, width, height, filter_y1), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat_0, x_hat_1 = test_layer(x)
grad_loss_0 = loss(x_hat_0, y_0)
grad_loss_1 = loss(x_hat_1, y_1)
grad = tape.gradient([grad_loss_0, grad_loss_1],
test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class SPPCSPCTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('SPPCSPC', 224, 224, 8, [5, 9, 13], 0.5),
('test1', 300, 300, 32, [2, 3, 4, 5], 1.0),
('test2', 256, 256, 16, [10], 2.0))
def test_pass_through(self, width, height, filters, pool_sizes, scale):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.SPPCSPC(filters, pool_sizes, scale)
out = test_layer(x)
self.assertAllEqual(out.shape.as_list(), [None, width, height, filters])
@parameterized.named_parameters(('SPPCSPC', 224, 224, 8, [5, 9, 13], 0.5),
('test1', 300, 300, 32, [2, 3, 4, 5], 1.0),
('test2', 256, 256, 16, [10], 2.0))
def test_gradient_pass_though(
self, width, height, filters, pool_sizes, scale):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.SPPCSPC(filters, pool_sizes, scale)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class RepConvTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('RepConv', 224, 224, 8, 1),
('test1', 300, 300, 32, 2),
('test2', 256, 256, 16, 4))
def test_pass_through(self, width, height, filters, strides):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.RepConv(filters, strides=strides)
out = test_layer(x)
self.assertAllEqual(out.shape.as_list(),
[None, width // strides, height // strides, filters])
@parameterized.named_parameters(('RepConv', 224, 224, 8, 1),
('test1', 300, 300, 32, 2),
('test2', 256, 256, 16, 4))
def test_gradient_pass_though(self, width, height, filters, strides):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.RepConv(filters, strides=strides)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, width // strides, height // strides, filters),
dtype=tf.float32,
)
)
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == '__main__':
tf.test.main()
| 14,811 | 37.87664 | 80 | py |
models | models-master/official/projects/yolo/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for yolo neural networks."""
import functools
from typing import Callable, List, Tuple
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.ops import spatial_transform_ops
class Identity(tf.keras.layers.Layer):
def call(self, inputs):
return inputs
class ConvBN(tf.keras.layers.Layer):
"""ConvBN block.
Modified Convolution layer to match that of the Darknet Library.
The Layer is a standards combination of Conv BatchNorm Activation,
however, the use of bias in the conv is determined by the use of batch
normalization.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters=1,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_separable_conv=False,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
**kwargs):
"""ConvBN initializer.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_size: integer or tuple for the shape of the weight matrix or kernel
to learn.
strides: integer of tuple how much to move the kernel after each kernel
use.
padding: string 'valid' or 'same', if same, then pad the image, else do
not.
dilation_rate: tuple to indicate how much to modulate kernel weights and
how many pixels in a feature map to skip.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_separable_conv: `bool` wether to use separable convs.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
**kwargs: Keyword Arguments.
"""
# convolution params
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._dilation_rate = dilation_rate
if kernel_initializer == 'VarianceScaling':
# to match pytorch initialization method
self._kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform')
else:
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
# batch normalization params
self._use_bn = use_bn
self._use_separable_conv = use_separable_conv
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
ksize = self._kernel_size
if not isinstance(ksize, List) and not isinstance(ksize, Tuple):
ksize = [ksize]
if use_separable_conv and not all([a == 1 for a in ksize]):
self._conv_base = tf.keras.layers.SeparableConv2D
else:
self._conv_base = tf.keras.layers.Conv2D
self._bn_base = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
# format: (batch_size, height, width, channels)
self._bn_axis = -1
else:
# format: (batch_size, channels, width, height)
self._bn_axis = 1
# activation params
self._activation = activation
self._leaky_alpha = leaky_alpha
self._fuse = False
super().__init__(**kwargs)
def build(self, input_shape):
use_bias = not self._use_bn
self.conv = self._conv_base(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding=self._padding,
dilation_rate=self._dilation_rate,
use_bias=use_bias,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
if self._use_bn:
self.bn = self._bn_base(
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
axis=self._bn_axis,
synchronized=self._use_sync_bn)
else:
self.bn = None
if self._activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._activation)
def call(self, x):
x = self.conv(x)
if self._use_bn and not self._fuse:
x = self.bn(x)
x = self._activation_fn(x)
return x
def fuse(self):
if self.bn is not None and not self._use_separable_conv:
# Fuse convolution and batchnorm, gives me +2 to 3 FPS 2ms latency.
# layers: https://tehnokv.com/posts/fusing-batchnorm-and-conv/
if self._fuse:
return
self._fuse = True
conv_weights = self.conv.get_weights()[0]
gamma, beta, moving_mean, moving_variance = self.bn.get_weights()
self.conv.use_bias = True
infilters = conv_weights.shape[-2]
self.conv.build([None, None, None, infilters])
base = tf.sqrt(self._norm_epsilon + moving_variance)
w_conv_base = tf.transpose(conv_weights, perm=(3, 2, 0, 1))
w_conv = tf.reshape(w_conv_base, [conv_weights.shape[-1], -1])
w_bn = tf.linalg.diag(gamma / base)
w_conv = tf.reshape(tf.matmul(w_bn, w_conv), w_conv_base.get_shape())
w_conv = tf.transpose(w_conv, perm=(2, 3, 1, 0))
b_bn = beta - gamma * moving_mean / base
self.conv.set_weights([w_conv, b_bn])
del self.bn
self.trainable = False
self.conv.trainable = False
self.bn = None
return
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': self._padding,
'dilation_rate': self._dilation_rate,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'kernel_regularizer': self._kernel_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'leaky_alpha': self._leaky_alpha
}
layer_config.update(super().get_config())
return layer_config
class DarkResidual(tf.keras.layers.Layer):
"""Darknet block with Residual connection for Yolo v3 Backbone."""
def __init__(self,
filters=1,
filter_scale=2,
dilation_rate=1,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
sc_activation='linear',
downsample=False,
**kwargs):
"""Dark Residual initializer.
Args:
filters: integer for output depth, or the number of features to learn.
filter_scale: `int` for filter scale.
dilation_rate: tuple to indicate how much to modulate kernel weights and
how many pixels in a feature map to skip.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
bias_regularizer: string to indicate which function to use to regularizer
bias.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics.
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
sc_activation: string for activation function to use in layer.
downsample: boolean for if image input is larger than layer output, set
downsample to True so the dimensions are forced to match.
**kwargs: Keyword Arguments.
"""
# downsample
self._downsample = downsample
# ConvBN params
self._filters = filters
self._filter_scale = filter_scale
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_regularizer = kernel_regularizer
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._dilation_rate = dilation_rate if isinstance(dilation_rate,
int) else dilation_rate[0]
# activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
self._sc_activation = sc_activation
super().__init__(**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha
}
if self._downsample:
if self._dilation_rate > 1:
dilation_rate = 1
if self._dilation_rate // 2 > 0:
dilation_rate = self._dilation_rate // 2
down_stride = 1
else:
dilation_rate = 1
down_stride = 2
self._dconv = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=down_stride,
dilation_rate=dilation_rate,
padding='same',
**dark_conv_args)
self._conv1 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._conv2 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(1, 1),
dilation_rate=self._dilation_rate,
padding='same',
**dark_conv_args)
self._shortcut = tf.keras.layers.Add()
if self._sc_activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._sc_activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._sc_activation)
super().build(input_shape)
def call(self, inputs, training=None):
if self._downsample:
inputs = self._dconv(inputs)
x = self._conv1(inputs)
x = self._conv2(x)
x = self._shortcut([x, inputs])
return self._activation_fn(x)
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'dilation_rate': self._dilation_rate,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'leaky_alpha': self._leaky_alpha,
'sc_activation': self._sc_activation,
'downsample': self._downsample,
}
layer_config.update(super().get_config())
return layer_config
class CSPTiny(tf.keras.layers.Layer):
"""CSP Tiny layer.
A Small size convolution block proposed in the CSPNet. The layer uses
shortcuts, routing(concatnation), and feature grouping in order to improve
gradient variablity and allow for high efficency, low power residual learning
for small networtf.keras.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters=1,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
dilation_rate=1,
use_sync_bn=False,
use_separable_conv=False,
group_id=1,
groups=2,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
downsample=True,
leaky_alpha=0.1,
**kwargs):
"""Initializer for CSPTiny block.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_bn: boolean for whether to use batch normalization.
dilation_rate: `int`, dilation rate for conv layers.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
group_id: integer for which group of features to pass through the csp
tiny stack.
groups: integer for how many splits there should be in the convolution
feature stack output.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
downsample: boolean for if image input is larger than layer output, set
downsample to True so the dimensions are forced to match.
leaky_alpha: float to use as alpha if activation function is leaky.
**kwargs: Keyword Arguments.
"""
# ConvBN params
self._filters = filters
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_regularizer = kernel_regularizer
self._groups = groups
self._group_id = group_id
self._downsample = downsample
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
super().__init__(**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'leaky_alpha': self._leaky_alpha
}
self._convlayer1 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer2 = ConvBN(
filters=self._filters // 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer3 = ConvBN(
filters=self._filters // 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer4 = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**dark_conv_args)
if self._downsample:
self._maxpool = tf.keras.layers.MaxPool2D(
pool_size=2, strides=2, padding='same', data_format=None)
super().build(input_shape)
def call(self, inputs, training=None):
x1 = self._convlayer1(inputs)
x1_group = tf.split(x1, self._groups, axis=-1)[self._group_id]
x2 = self._convlayer2(x1_group) # grouping
x3 = self._convlayer3(x2)
x4 = tf.concat([x3, x2], axis=-1) # csp partial using grouping
x5 = self._convlayer4(x4)
x = tf.concat([x1, x5], axis=-1) # csp connect
if self._downsample:
x = self._maxpool(x)
return x, x5
class CSPRoute(tf.keras.layers.Layer):
"""CSPRoute block.
Down sampling layer to take the place of down sampleing done in Residual
networks. This is the first of 2 layers needed to convert any Residual Network
model to a CSPNet. At the start of a new level change, this CSPRoute layer
creates a learned identity that will act as a cross stage connection,
that is used to inform the inputs to the next stage. It is called cross stage
partial because the number of filters required in every intermitent Residual
layer is reduced by half. The sister layer will take the partial generated by
this layer and concatnate it with the output of the final residual layer in
the stack to create a fully feature level output. This concatnation merges the
partial blocks of 2 levels as input to the next allowing the gradients of each
level to be more unique, and reducing the number of parameters required by
each level by 50% while keeping accuracy consistent.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
filter_scale=2,
activation='mish',
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
dilation_rate=1,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
downsample=True,
leaky_alpha=0.1,
**kwargs):
"""CSPRoute layer initializer.
Args:
filters: integer for output depth, or the number of features to learn
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
activation: string for activation function to use in layer.
kernel_initializer: string to indicate which function to use to
initialize weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
dilation_rate: dilation rate for conv layers.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
downsample: down_sample the input.
leaky_alpha: `float`, for leaky alpha value.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
# convoultion params
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._dilation_rate = dilation_rate
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._downsample = downsample
self._leaky_alpha = leaky_alpha
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if self._downsample:
if self._dilation_rate > 1:
dilation_rate = 1
if self._dilation_rate // 2 > 0:
dilation_rate = self._dilation_rate // 2
down_stride = 1
else:
dilation_rate = 1
down_stride = 2
self._conv1 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=down_stride,
dilation_rate=dilation_rate,
**dark_conv_args)
self._conv2 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
self._conv3 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
def call(self, inputs, training=None):
if self._downsample:
inputs = self._conv1(inputs)
y = self._conv2(inputs)
x = self._conv3(inputs)
return (x, y)
class CSPConnect(tf.keras.layers.Layer):
"""CSPConnect block.
Sister Layer to the CSPRoute layer. Merges the partial feature stacks
generated by the CSPDownsampling layer, and the finaly output of the
residual stack. Suggested in the CSPNet paper.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
filter_scale=2,
drop_final=False,
drop_first=False,
activation='mish',
kernel_size=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
dilation_rate=1,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
leaky_alpha=0.1,
**kwargs):
"""Initializer for CSPConnect block.
Args:
filters: integer for output depth, or the number of features to learn.
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
drop_final: `bool`, whether to drop final conv layer.
drop_first: `bool`, whether to drop first conv layer.
activation: string for activation function to use in layer.
kernel_size: `Tuple`, kernel size for conv layers.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
dilation_rate: `int`, dilation rate for conv layers.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global
statistics (across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
leaky_alpha: `float`, for leaky alpha value.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
# convoultion params
self._kernel_size = kernel_size
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._drop_final = drop_final
self._drop_first = drop_first
self._leaky_alpha = leaky_alpha
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if not self._drop_first:
self._conv1 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=self._kernel_size,
strides=(1, 1),
**dark_conv_args)
self._concat = tf.keras.layers.Concatenate(axis=-1)
if not self._drop_final:
self._conv2 = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
def call(self, inputs, training=None):
x_prev, x_csp = inputs
if not self._drop_first:
x_prev = self._conv1(x_prev)
x = self._concat([x_prev, x_csp])
# skipped if drop final is true
if not self._drop_final:
x = self._conv2(x)
return x
class CSPStack(tf.keras.layers.Layer):
"""CSP Stack layer.
CSP full stack, combines the route and the connect in case you dont want to
jsut quickly wrap an existing callable or list of layers to
make it a cross stage partial. Added for ease of use. you should be able
to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack
passed into the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
model_to_wrap=None,
filter_scale=2,
activation='mish',
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
downsample=True,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""CSPStack layer initializer.
Args:
filters: filter size for conv layers.
model_to_wrap: callable Model or a list of callable objects that will
process the output of CSPRoute, and be input into CSPConnect. list will
be called sequentially.
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
activation: string for activation function to use in layer.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
downsample: down_sample the input.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics of
all batch norm layers to the models global statistics (across all input
batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
**kwargs: Keyword Arguments.
Raises:
TypeError: model_to_wrap is not a layer or a list of layers
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
self._downsample = downsample
# convoultion params
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if model_to_wrap is None:
self._model_to_wrap = []
elif isinstance(model_to_wrap, Callable):
self._model_to_wrap = [model_to_wrap]
elif isinstance(model_to_wrap, List):
self._model_to_wrap = model_to_wrap
else:
raise TypeError(
'the input to the CSPStack must be a list of layers that we can' +
'iterate through, or \n a callable')
def build(self, input_shape):
dark_conv_args = {
'filters': self._filters,
'filter_scale': self._filter_scale,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_regularizer': self._kernel_regularizer,
}
self._route = CSPRoute(downsample=self._downsample, **dark_conv_args)
self._connect = CSPConnect(**dark_conv_args)
def call(self, inputs, training=None):
x, x_route = self._route(inputs)
for layer in self._model_to_wrap:
x = layer(x)
x = self._connect([x, x_route])
return x
class PathAggregationBlock(tf.keras.layers.Layer):
"""Path Aggregation block."""
def __init__(self,
filters=1,
drop_final=True,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
inverted=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
downsample=False,
upsample=False,
upsample_size=2,
**kwargs):
"""Initializer for path aggregation block.
Args:
filters: integer for output depth, or the number of features to learn.
drop_final: do not create the last convolution block.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
inverted: boolean for inverting the order of the convolutions.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
downsample: `bool` for whehter to downwample and merge.
upsample: `bool` for whehter to upsample and merge.
upsample_size: `int` how much to upsample in order to match shapes.
**kwargs: Keyword Arguments.
"""
# Darkconv params
self._filters = filters
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._kernel_regularizer = kernel_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
# Normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# Activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
self._downsample = downsample
self._upsample = upsample
self._upsample_size = upsample_size
self._drop_final = drop_final
# Block params
self._inverted = inverted
super().__init__(**kwargs)
def _build_regular(self, input_shape, kwargs):
if self._downsample:
self._conv = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
**kwargs)
else:
self._conv = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
if not self._drop_final:
self._conv_concat = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
def _build_reversed(self, input_shape, kwargs):
if self._downsample:
self._conv_prev = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
**kwargs)
else:
self._conv_prev = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
self._conv_route = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
if not self._drop_final:
self._conv_sync = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if self._inverted:
self._build_reversed(input_shape, dark_conv_args)
else:
self._build_regular(input_shape, dark_conv_args)
self._concat = tf.keras.layers.Concatenate()
super().build(input_shape)
def _call_regular(self, inputs, training=None):
input_to_convolve, input_to_concat = inputs
x_prev = self._conv(input_to_convolve)
if self._upsample:
x_prev = spatial_transform_ops.nearest_upsampling(x_prev,
self._upsample_size)
x = self._concat([x_prev, input_to_concat])
# used in csp conversion
if not self._drop_final:
x = self._conv_concat(x)
return x_prev, x
def _call_reversed(self, inputs, training=None):
x_route, x_prev = inputs
x_prev = self._conv_prev(x_prev)
if self._upsample:
x_prev = spatial_transform_ops.nearest_upsampling(x_prev,
self._upsample_size)
x_route = self._conv_route(x_route)
x = self._concat([x_route, x_prev])
if not self._drop_final:
x = self._conv_sync(x)
return x_prev, x
def call(self, inputs, training=None):
# done this way to prevent confusion in the auto graph
if self._inverted:
return self._call_reversed(inputs, training=training)
else:
return self._call_regular(inputs, training=training)
class SPP(tf.keras.layers.Layer):
"""Spatial Pyramid Pooling.
A non-agregated SPP layer that uses Pooling.
"""
def __init__(self, sizes, **kwargs):
self._sizes = list(reversed(sizes))
if not sizes:
raise ValueError('More than one maxpool should be specified in SPP block')
super().__init__(**kwargs)
def build(self, input_shape):
maxpools = []
for size in self._sizes:
maxpools.append(
tf.keras.layers.MaxPool2D(
pool_size=(size, size),
strides=(1, 1),
padding='same',
data_format=None))
self._maxpools = maxpools
super().build(input_shape)
def call(self, inputs, training=None):
outputs = []
for maxpool in self._maxpools:
outputs.append(maxpool(inputs))
outputs.append(inputs)
concat_output = tf.keras.layers.concatenate(outputs)
return concat_output
def get_config(self):
layer_config = {'sizes': self._sizes}
layer_config.update(super().get_config())
return layer_config
class SAM(tf.keras.layers.Layer):
"""Spatial Attention Model.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
implementation of the Spatial Attention Model (SAM)
"""
def __init__(self,
use_pooling=False,
filter_match=False,
filters=1,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=True,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='sigmoid',
output_activation=None,
leaky_alpha=0.1,
**kwargs):
# use_pooling
self._use_pooling = use_pooling
self._filters = filters
self._output_activation = output_activation
self._leaky_alpha = leaky_alpha
self.dark_conv_args = {
'kernel_size': kernel_size,
'strides': strides,
'padding': padding,
'dilation_rate': dilation_rate,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'use_bn': use_bn,
'use_sync_bn': use_sync_bn,
'use_separable_conv': use_separable_conv,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'leaky_alpha': leaky_alpha
}
super().__init__(**kwargs)
def build(self, input_shape):
if self._filters == -1:
self._filters = input_shape[-1]
self._conv = ConvBN(filters=self._filters, **self.dark_conv_args)
if self._output_activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._output_activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._output_activation)
def call(self, inputs, training=None):
if self._use_pooling:
depth_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
depth_avg = tf.reduce_mean(inputs, axis=-1, keepdims=True)
input_maps = tf.concat([depth_avg, depth_max], axis=-1)
else:
input_maps = inputs
attention_mask = self._conv(input_maps)
return self._activation_fn(inputs * attention_mask)
class CAM(tf.keras.layers.Layer):
"""Channel Attention Model.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
Implementation of the Channel Attention Model (CAM)
"""
def __init__(self,
reduction_ratio=1.0,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=False,
use_sync_bn=False,
use_bias=False,
norm_momentum=0.99,
norm_epsilon=0.001,
mlp_activation='linear',
activation='sigmoid',
leaky_alpha=0.1,
**kwargs):
self._reduction_ratio = reduction_ratio
if not use_bn:
self._bn = Identity
self._bn_args = {}
else:
self._bn = functools.partial(
tf.keras.layers.BatchNormalization, synchronized=use_sync_bn)
self._bn_args = {
'momentum': norm_momentum,
'epsilon': norm_epsilon,
}
self._mlp_args = {
'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'activation': mlp_activation,
'kernel_regularizer': kernel_regularizer,
}
self._leaky_alpha = leaky_alpha
self._activation = activation
super().__init__(**kwargs)
def build(self, input_shape):
self._filters = input_shape[-1]
self._mlp = tf.keras.Sequential([
tf.keras.layers.Dense(self._filters, **self._mlp_args),
self._bn(**self._bn_args),
tf.keras.layers.Dense(
int(self._filters * self._reduction_ratio), **self._mlp_args),
self._bn(**self._bn_args),
tf.keras.layers.Dense(self._filters, **self._mlp_args),
self._bn(**self._bn_args),
])
if self._activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._activation)
def call(self, inputs, training=None):
depth_max = self._mlp(tf.reduce_max(inputs, axis=(1, 2)))
depth_avg = self._mlp(tf.reduce_mean(inputs, axis=(1, 2)))
channel_mask = self._activation_fn(depth_avg + depth_max)
channel_mask = tf.expand_dims(channel_mask, axis=1)
attention_mask = tf.expand_dims(channel_mask, axis=1)
return inputs * attention_mask
class CBAM(tf.keras.layers.Layer):
"""Convolutional Block Attention Module.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
implementation of the Convolution Block Attention Module (CBAM)
"""
def __init__(self,
use_pooling=False,
filters=1,
reduction_ratio=1.0,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
mlp_activation=None,
activation='sigmoid',
leaky_alpha=0.1,
**kwargs):
# use_pooling
self._sam_args = {
'use_pooling': use_pooling,
'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'padding': padding,
'dilation_rate': dilation_rate,
'use_separable_conv': use_separable_conv,
}
self._cam_args = {
'reduction_ratio': reduction_ratio,
'mlp_activation': mlp_activation
}
self._common_args = {
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'use_bn': use_bn,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'leaky_alpha': leaky_alpha
}
self._cam_args.update(self._common_args)
self._sam_args.update(self._common_args)
super().__init__(**kwargs)
def build(self, input_shape):
self._cam = CAM(**self._cam_args)
self._sam = SAM(**self._sam_args)
def call(self, inputs, training=None):
return self._sam(self._cam(inputs))
class DarkRouteProcess(tf.keras.layers.Layer):
"""Dark Route Process block.
Process darknet outputs and connect back bone to head more generalizably
Abstracts repetition of DarkConv objects that is common in YOLO.
It is used like the following:
x = ConvBN(1024, (3, 3), (1, 1))(x)
proc = DarkRouteProcess(filters = 1024,
repetitions = 3,
insert_spp = False)(x)
"""
def __init__(self,
filters=2,
repetitions=2,
insert_spp=False,
insert_sam=False,
insert_cbam=False,
csp_stack=0,
csp_scale=2,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
block_invert=False,
activation='leaky',
leaky_alpha=0.1,
spp_keys=None,
**kwargs):
"""DarkRouteProcess initializer.
Args:
filters: the number of filters to be used in all subsequent layers
filters should be the depth of the tensor input into this layer,
as no downsampling can be done within this layer object.
repetitions: number of times to repeat the processign nodes.
for tiny: 1 repition, no spp allowed.
for spp: insert_spp = True, and allow for 6 repetitions.
for regular: insert_spp = False, and allow for 6 repetitions.
insert_spp: bool if true add the spatial pyramid pooling layer.
insert_sam: bool if true add spatial attention module to path.
insert_cbam: bool if true add convolutional block attention
module to path.
csp_stack: int for the number of sequential layers from 0
to <value> you would like to convert into a Cross Stage
Partial(csp) type.
csp_scale: int for how much to down scale the number of filters
only for the csp layers in the csp section of the processing
path. A value 2 indicates that each layer that is int eh CSP
stack will have filters = filters/2.
kernel_initializer: method to use to initialize kernel weights.
bias_initializer: method to use to initialize the bias of the conv
layers.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_sync_bn: bool if true use the sync batch normalization.
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: batch norm parameter see Tensorflow documentation.
norm_epsilon: batch norm parameter see Tensorflow documentation.
block_invert: bool use for switching between the even and odd
repretions of layers. usually the repetition is based on a
3x3 conv with filters, followed by a 1x1 with filters/2 with
an even number of repetitions to ensure each 3x3 gets a 1x1
sqeeze. block invert swaps the 3x3/1 1x1/2 to a 1x1/2 3x3/1
ordering typically used when the model requires an odd number
of repetiitions. All other peramters maintain their affects
activation: activation function to use in processing.
leaky_alpha: if leaky acitivation function, the alpha to use in
processing the relu input.
spp_keys: List[int] of the sampling levels to be applied by
the Spatial Pyramid Pooling Layer. By default it is
[5, 9, 13] inidicating a 5x5 pooling followed by 9x9
followed by 13x13 then followed by the standard concatnation
and convolution.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# darkconv params
self._filters = filters
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._kernel_regularizer = kernel_regularizer
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# activation params
self._activation = activation
self._leaky_alpha = leaky_alpha
repetitions += (2 * int(insert_spp))
if repetitions == 1:
block_invert = True
self._repetitions = repetitions
self.layer_list, self.outputs = self._get_base_layers()
if csp_stack > 0:
self._csp_scale = csp_scale
csp_stack += (2 * int(insert_spp))
self._csp_filters = lambda x: x // csp_scale
self._convert_csp(self.layer_list, self.outputs, csp_stack)
block_invert = False
self._csp_stack = csp_stack
if block_invert:
self._conv1_filters = lambda x: x
self._conv2_filters = lambda x: x // 2
self._conv1_kernel = (3, 3)
self._conv2_kernel = (1, 1)
else:
self._conv1_filters = lambda x: x // 2
self._conv2_filters = lambda x: x
self._conv1_kernel = (1, 1)
self._conv2_kernel = (3, 3)
# insert SPP will always add to the total nuber of layer, never replace
if insert_spp:
self._spp_keys = spp_keys if spp_keys is not None else [5, 9, 13]
self.layer_list = self._insert_spp(self.layer_list)
if repetitions > 1:
self.outputs[-2] = True
if insert_sam:
self.layer_list = self._insert_sam(self.layer_list, self.outputs)
self._repetitions += 1
self.outputs[-1] = True
def _get_base_layers(self):
layer_list = []
outputs = []
for i in range(self._repetitions):
layers = ['conv1'] * ((i + 1) % 2) + ['conv2'] * (i % 2)
layer_list.extend(layers)
outputs = [False] + outputs
return layer_list, outputs
def _insert_spp(self, layer_list):
if len(layer_list) <= 3:
layer_list[1] = 'spp'
else:
layer_list[3] = 'spp'
return layer_list
def _convert_csp(self, layer_list, outputs, csp_stack_size):
layer_list[0] = 'csp_route'
layer_list.insert(csp_stack_size - 1, 'csp_connect')
outputs.insert(csp_stack_size - 1, False)
return layer_list, outputs
def _insert_sam(self, layer_list, outputs):
if len(layer_list) >= 2 and layer_list[-2] != 'spp':
layer_list.insert(-2, 'sam')
outputs.insert(-1, True)
else:
layer_list.insert(-1, 'sam')
outputs.insert(-1, False)
return layer_list
def _conv1(self, filters, kwargs, csp=False):
if csp:
filters_ = self._csp_filters
else:
filters_ = self._conv1_filters
x1 = ConvBN(
filters=filters_(filters),
kernel_size=self._conv1_kernel,
strides=(1, 1),
padding='same',
use_bn=True,
**kwargs)
return x1
def _conv2(self, filters, kwargs, csp=False):
if csp:
filters_ = self._csp_filters
else:
filters_ = self._conv2_filters
x1 = ConvBN(
filters=filters_(filters),
kernel_size=self._conv2_kernel,
strides=(1, 1),
padding='same',
use_bn=True,
**kwargs)
return x1
def _csp_route(self, filters, kwargs):
x1 = CSPRoute(
filters=filters,
filter_scale=self._csp_scale,
downsample=False,
**kwargs)
return x1
def _csp_connect(self, filters, kwargs):
x1 = CSPConnect(filters=filters, drop_final=True, drop_first=True, **kwargs)
return x1
def _spp(self, filters, kwargs):
x1 = SPP(self._spp_keys)
return x1
def _sam(self, filters, kwargs):
x1 = SAM(filters=-1, use_pooling=False, use_bn=True, **kwargs)
return x1
def build(self, input_shape):
dark_conv_args = {
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
csp = False
self.layers = []
for layer in self.layer_list:
if layer == 'csp_route':
self.layers.append(self._csp_route(self._filters, dark_conv_args))
csp = True
elif layer == 'csp_connect':
self.layers.append(self._csp_connect(self._filters, dark_conv_args))
csp = False
elif layer == 'conv1':
self.layers.append(self._conv1(self._filters, dark_conv_args, csp=csp))
elif layer == 'conv2':
self.layers.append(self._conv2(self._filters, dark_conv_args, csp=csp))
elif layer == 'spp':
self.layers.append(self._spp(self._filters, dark_conv_args))
elif layer == 'sam':
self.layers.append(self._sam(-1, dark_conv_args))
self._lim = len(self.layers)
super().build(input_shape)
def _call_regular(self, inputs, training=None):
# check efficiency
x = inputs
x_prev = x
output_prev = True
for (layer, output) in zip(self.layers, self.outputs):
if output_prev:
x_prev = x
x = layer(x)
output_prev = output
return x_prev, x
def _call_csp(self, inputs, training=None):
# check efficiency
x = inputs
x_prev = x
output_prev = True
x_route = None
for i, (layer, output) in enumerate(zip(self.layers, self.outputs)):
if output_prev:
x_prev = x
if i == 0:
x, x_route = layer(x)
elif i == self._csp_stack - 1:
x = layer([x, x_route])
else:
x = layer(x)
output_prev = output
return x_prev, x
def call(self, inputs, training=None):
if self._csp_stack > 0:
return self._call_csp(inputs, training=training)
else:
return self._call_regular(inputs)
class Reorg(tf.keras.layers.Layer):
"""Splits a high resolution image into 4 lower resolution images.
Used in YOLOR to process very high resolution inputs efficiently.
for example an input image of [1280, 1280, 3] will become [640, 640, 12],
the images are sampled in such a way that the spatial resoltion is
retained.
"""
def call(self, x, training=None):
return tf.concat([
x[..., ::2, ::2, :], x[..., 1::2, ::2, :], x[..., ::2, 1::2, :],
x[..., 1::2, 1::2, :]
],
axis=-1)
class SPPCSPC(tf.keras.layers.Layer):
"""Cross-stage partial network with spatial pyramid pooling.
This module is used in YOLOv7 to process backbone feature at the highest
level. SPPCSPC uses fusion-first CSP block and it uses SPP within
the dense block.
"""
def __init__(
self,
filters,
pool_sizes=(5, 9, 13),
scale=0.5,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
use_separable_conv=False,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
**kwargs):
"""Initializes SPPCSPC block.
Args:
filters: an `int` for filters used in Conv2D.
pool_sizes: a tuple of `int` for maxpool layer used in the dense block.
scale: a `float` scale that applies on the filters to determine the
internal Conv2D filters within CSP block.
kernel_initializer: string to indicate which function to use to initialize
weights in Conv2D.
bias_initializer: string to indicate which function to use to initialize
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights in Conv2D.
bias_regularizer: string to indicate which function to use to regularizer
bias.
use_separable_conv: `bool` wether to use separable convs.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string to indicate the activation function used after each
Conv2D.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._filters = filters
self._pool_sizes = pool_sizes
self._scale = scale
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_separable_conv = use_separable_conv
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
def build(self, input_shape):
filters = self._filters * 2 * self._scale
conv_op = functools.partial(
ConvBN,
activation=self._activation,
use_separable_conv=self._use_separable_conv,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
use_bn=self._use_bn,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
)
self._conv1_1 = conv_op(filters, kernel_size=1, strides=1)
self._conv1_2 = conv_op(filters, kernel_size=3, strides=1)
self._conv1_3 = conv_op(filters, kernel_size=1, strides=1)
self._poolings = [
tf.keras.layers.MaxPooling2D(pool_size, strides=1, padding='same')
for pool_size in self._pool_sizes
]
self._conv1_4 = conv_op(filters, kernel_size=1, strides=1)
self._conv1_5 = conv_op(filters, kernel_size=3, strides=1)
self._conv2_1 = conv_op(filters, kernel_size=1, strides=1)
self._merge_conv = conv_op(self._filters, kernel_size=1, strides=1)
super().build(input_shape)
def call(self, inputs, training=None):
x = self._conv1_3(self._conv1_2(self._conv1_1(inputs)))
x = self._conv1_5(
self._conv1_4(
tf.concat([x] + [pooling(x) for pooling in self._poolings], -1)
)
)
y = self._conv2_1(inputs)
return self._merge_conv(tf.concat([x, y], axis=-1))
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'pool_sizes': self._pool_sizes,
'scale': self._scale,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
}
layer_config.update(super().get_config())
return layer_config
class RepConv(tf.keras.layers.Layer):
"""Represented convolution.
https://arxiv.org/abs/2101.03697
"""
def __init__(
self,
filters,
kernel_size=3,
strides=1,
padding='same',
activation='swish',
use_separable_conv=False,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs
):
"""Initializes RepConv layer.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_size: integer or tuple for the shape of the weight matrix or kernel
to learn.
strides: integer of tuple how much to move the kernel after each kernel
use.
padding: string 'valid' or 'same', if same, then pad the image, else do
not.
activation: string or None for activation function to use in layer, if
None activation is replaced by linear.
use_separable_conv: `bool` wether to use separable convs.
use_sync_bn: boolean for whether sync batch normalization statistics of
all batch norm layers to the models global statistics (across all input
batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
kernel_initializer: string to indicate which function to use to initialize
weights.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._activation = activation
self._use_separable_conv = use_separable_conv
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
# For deploy.
self._fuse = False
def build(self, input_shape):
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D
if self._use_separable_conv
else tf.keras.layers.Conv2D,
filters=self._filters,
strides=self._strides,
padding=self._padding,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
)
bn_op = functools.partial(
tf.keras.layers.BatchNormalization,
synchronized=self._use_sync_bn,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
)
self._activation_fn = tf_utils.get_activation(self._activation)
self._rbr_reparam = conv_op(kernel_size=self._kernel_size, use_bias=True)
if input_shape[-1] == self._filters and self._strides == 1:
self._rbr_identity = bn_op()
self._rbr_dense = conv_op(kernel_size=self._kernel_size, use_bias=False)
self._rbr_dense_bn = bn_op()
self._rbr_1x1 = conv_op(kernel_size=1, use_bias=False)
self._rbr_1x1_bn = bn_op()
def call(self, inputs, training=None):
if self._fuse:
return self._activation_fn(self._rbr_reparam(inputs))
id_out = 0
if hasattr(self, '_rbr_identity'):
id_out = self._rbr_identity(inputs)
x = self._rbr_dense_bn(self._rbr_dense(inputs))
y = self._rbr_1x1_bn(self._rbr_1x1(inputs))
return self._activation_fn(x + y + id_out)
def fuse(self):
if self._fuse:
return
# TODO(b/264495198): Implement fuse for RepConv.
raise NotImplementedError()
| 68,760 | 34.153885 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains backbone architectures for YOLOv7 families.
The models are built with ELAN and E-ELAN.
ELAN was proposed in:
[1] Wang, Chien-Yao and Liao, Hong-Yuan Mark and Yeh, I-Hau
Designing Network Design Strategies Through Gradient Path Analysis
arXiv:2211.04800
E-ELAN is proposed in YOLOv7 paper:
[1] Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark
YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time
object detectors
arXiv:2207.02696
"""
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.projects.yolo.ops import initializer_ops
from official.vision.modeling.backbones import factory
# Required block functions for YOLOv7 backbone familes.
_BLOCK_FNS = {
'convbn': nn_blocks.ConvBN,
'maxpool2d': tf.keras.layers.MaxPooling2D,
'concat': tf.keras.layers.Concatenate,
}
# Names for key arguments needed by each block function.
_BLOCK_SPEC_SCHEMAS = {
'convbn': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
'maxpool2d': [
'block_fn',
'from',
'pool_size',
'strides',
'padding',
'is_output',
],
'concat': [
'block_fn',
'from',
'axis',
'is_output',
]
}
# Define YOLOv7-tiny variant.
_YoloV7Tiny = [
['convbn', -1, 3, 2, 32, False], # 0-P1/2
['convbn', -1, 3, 2, 64, False], # 1-P2/4
['convbn', -1, 1, 1, 32, False],
['convbn', -2, 1, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 64, False], # 7
['maxpool2d', -1, 2, 2, 'same', False], # 8-P3/8
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, True], # 14
['maxpool2d', -1, 2, 2, 'same', False], # 15-P4/16
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, True], # 21
['maxpool2d', -1, 2, 2, 'same', False], # 22-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 512, True], # 28
]
# Define YOLOv7 variant.
_YoloV7 = [
['convbn', -1, 3, 1, 32, False], # 0
['convbn', -1, 3, 2, 64, False], # 1-P1/2
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 2, 128, False], # 3-P2/4
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 11
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 128, False],
['convbn', -3, 1, 1, 128, False],
['convbn', -1, 3, 2, 128, False],
['concat', [-1, -3], -1, False], # 16-P3/8
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 512, True], # 24
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 256, False],
['convbn', -3, 1, 1, 256, False],
['convbn', -1, 3, 2, 256, False],
['concat', [-1, -3], -1, False], # 29-P4/16
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 1024, True], # 37
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 512, False],
['convbn', -3, 1, 1, 512, False],
['convbn', -1, 3, 2, 512, False],
['concat', [-1, -3], -1, False], # 42-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 1024, True], # 50
]
_YoloV7X = [
['convbn', -1, 3, 1, 40, False], # 0
['convbn', -1, 3, 2, 80, False], # 1-P1/2
['convbn', -1, 3, 1, 80, False],
['convbn', -1, 3, 2, 160, False], # 3-P2/4
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 13
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 160, False],
['convbn', -3, 1, 1, 160, False],
['convbn', -1, 3, 2, 160, False],
['concat', [-1, -3], -1, False], # 18-P3/8
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 640, True], # 28
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 320, False],
['convbn', -3, 1, 1, 320, False],
['convbn', -1, 3, 2, 320, False],
['concat', [-1, -3], -1, False], # 33-P4/16
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 1280, True], # 43
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 640, False],
['convbn', -3, 1, 1, 640, False],
['convbn', -1, 3, 2, 640, False],
['concat', [-1, -3], -1, False], # 48-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 1280, True], # 58
]
# Aggregates all variants for YOLOv7 backbones.
BACKBONES = {
'yolov7-tiny': _YoloV7Tiny,
'yolov7': _YoloV7,
'yolov7x': _YoloV7X,
}
class YoloV7(tf.keras.Model):
"""YOLOv7 backbone architecture."""
def __init__(
self,
model_id='yolov7',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs):
"""Initializes the YOLOv7 backbone.
Args:
model_id: a `str` represents the model variants.
input_specs: a `tf.keras.layers.InputSpec` of the input tensor.
use_sync_bn: if set to `True`, use synchronized batch normalization.
norm_momentum: a `float` of normalization momentum for the moving average.
norm_epsilon: a small `float` added to variance to avoid dividing by zero.
activation: a `str` name of the activation function.
kernel_initializer: a `str` for kernel initializer of convolutional
layers.
kernel_regularizer: a `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_initializer: a `str` for bias initializer of convolutional layers.
bias_regularizer: a `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
inputs = tf.keras.layers.Input(shape=input_specs.shape[1:])
block_specs = BACKBONES[model_id.lower()]
outputs = []
endpoints = {}
level = 3
for spec in block_specs:
block_kwargs = dict(zip(_BLOCK_SPEC_SCHEMAS[spec[0]], spec))
block_fn_str = block_kwargs.pop('block_fn')
from_index = block_kwargs.pop('from')
is_output = block_kwargs.pop('is_output')
if not outputs:
x = inputs
elif isinstance(from_index, int):
x = outputs[from_index]
else:
x = [outputs[idx] for idx in from_index]
if block_fn_str in ['convbn']:
block_kwargs.update({
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
})
block_fn = _BLOCK_FNS[block_fn_str](**block_kwargs)
x = block_fn(x)
outputs.append(x)
if is_output:
endpoints[str(level)] = x
level += 1
self._output_specs = {k: v.get_shape() for k, v in endpoints.items()}
super().__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('yolov7')
def build_yolov7(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds YOLOv7."""
assert backbone_config.type == 'yolov7', (
f'Inconsistent backbone type {backbone_config.type}.')
backbone_config = backbone_config.get()
assert backbone_config.model_id in BACKBONES, (
f'Unsupported backbone {backbone_config.model_id}.')
model = YoloV7(
model_id=backbone_config.model_id,
input_specs=input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer,
)
return model
| 12,875 | 32.185567 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/yolov7_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 backbone."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import yolov7
_INPUT_SIZE = (224, 224)
class YoloV7BackboneTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 backbone variants."""
tf.keras.backend.set_image_data_format('channels_last')
network = yolov7.YoloV7(model_id)
self.assertEqual(network.get_config()['model_id'], model_id)
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = network(inputs)
for level, level_output in outputs.items():
scale = 2**int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
self.assertAllEqual((1, *input_size), level_output.shape.as_list()[:-1])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
)
def test_sync_bn_multiple_devices(self, strategy):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, *_INPUT_SIZE, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = yolov7.YoloV7(model_id='yolov7')
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
network = yolov7.YoloV7(**kwargs)
# Create another network object from the first object's config.
new_network = yolov7.YoloV7.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,049 | 31.795699 | 79 | py |
models | models-master/official/projects/yolo/modeling/backbones/darknet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import darknet
class DarknetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(224, 'darknet53', 2, 1, True),
(224, 'darknettiny', 1, 2, False),
(224, 'cspdarknettiny', 1, 1, False),
(224, 'cspdarknet53', 2, 1, True),
)
def test_network_creation(self, input_size, model_id, endpoint_filter_scale,
scale_final, dilate):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = darknet.Darknet(
model_id=model_id, min_level=3, max_level=5, dilate=dilate)
self.assertEqual(network.model_id, model_id)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
if dilate:
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3,
512 * endpoint_filter_scale * scale_final
], endpoints['5'].shape.as_list())
else:
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**5, input_size / 2**5,
512 * endpoint_filter_scale * scale_final
], endpoints['5'].shape.as_list())
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, 224, 224, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = darknet.Darknet(
model_id='darknet53',
min_level=3,
max_level=5,
use_sync_bn=use_sync_bn,
)
_ = network(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = darknet.Darknet(
model_id='darknet53', min_level=3, max_level=5, input_specs=input_specs)
inputs = tf.keras.Input(shape=(224, 224, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='darknet53',
min_level=3,
max_level=5,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = darknet.Darknet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = darknet.Darknet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,758 | 34.251852 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/darknet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Darknet Backbone Networks.
The models are inspired by ResNet and CSPNet.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen,
Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
Darknets are used mainly for object detection in:
[1] Joseph Redmon, Ali Farhadi
YOLOv3: An Incremental Improvement. arXiv:1804.02767
[2] Alexey Bochkovskiy, Chien-Yao Wang, Hong-Yuan Mark Liao
YOLOv4: Optimal Speed and Accuracy of Object Detection. arXiv:2004.10934
"""
import collections
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.vision.modeling.backbones import factory
class BlockConfig:
"""Class to store layer config to make code more readable."""
def __init__(self, layer, stack, reps, bottleneck, filters, pool_size,
kernel_size, strides, padding, activation, route, dilation_rate,
output_name, is_output):
"""Initializing method for BlockConfig.
Args:
layer: A `str` for layer name.
stack: A `str` for the type of layer ordering to use for this specific
level.
reps: An `int` for the number of times to repeat block.
bottleneck: A `bool` for whether this stack has a bottle neck layer.
filters: An `int` for the output depth of the level.
pool_size: An `int` for the pool_size of max pool layers.
kernel_size: An `int` for convolution kernel size.
strides: A `Union[int, tuple]` that indicates convolution strides.
padding: An `int` for the padding to apply to layers in this stack.
activation: A `str` for the activation to use for this stack.
route: An `int` for the level to route from to get the next input.
dilation_rate: An `int` for the scale used in dialated Darknet.
output_name: A `str` for the name to use for this output.
is_output: A `bool` for whether this layer is an output in the default
model.
"""
self.layer = layer
self.stack = stack
self.repetitions = reps
self.bottleneck = bottleneck
self.filters = filters
self.kernel_size = kernel_size
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.activation = activation
self.route = route
self.dilation_rate = dilation_rate
self.output_name = output_name
self.is_output = is_output
def build_block_specs(config):
specs = []
for layer in config:
specs.append(BlockConfig(*layer))
return specs
class LayerBuilder:
"""Layer builder class.
Class for quick look up of default layers used by darknet to
connect, introduce or exit a level. Used in place of an if condition
or switch to make adding new layers easier and to reduce redundant code.
"""
def __init__(self):
self._layer_dict = {
'ConvBN': (nn_blocks.ConvBN, self.conv_bn_config_todict),
'MaxPool': (tf.keras.layers.MaxPool2D, self.maxpool_config_todict)
}
def conv_bn_config_todict(self, config, kwargs):
dictvals = {
'filters': config.filters,
'kernel_size': config.kernel_size,
'strides': config.strides,
'padding': config.padding
}
dictvals.update(kwargs)
return dictvals
def darktiny_config_todict(self, config, kwargs):
dictvals = {'filters': config.filters, 'strides': config.strides}
dictvals.update(kwargs)
return dictvals
def maxpool_config_todict(self, config, kwargs):
return {
'pool_size': config.pool_size,
'strides': config.strides,
'padding': config.padding,
'name': kwargs['name']
}
def __call__(self, config, kwargs):
layer, get_param_dict = self._layer_dict[config.layer]
param_dict = get_param_dict(config, kwargs)
return layer(**param_dict)
# model configs
LISTNAMES = [
'default_layer_name', 'level_type', 'number_of_layers_in_level',
'bottleneck', 'filters', 'kernal_size', 'pool_size', 'strides', 'padding',
'default_activation', 'route', 'dilation', 'level/name', 'is_output'
]
CSPDARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 106,
'neck_split': 132
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
CSPADARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'mish',
-1, 1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
LARGECSP53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, False, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 3, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 15, False, 256, None, None, None, None, 'mish',
-1, 1, 3, True
],
[
'DarkRes', 'csp', 15, False, 512, None, None, None, None, 'mish',
-1, 2, 4, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 8, 6, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 16, 7, True
],
]
}
DARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 76
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'leaky',
-1, 1, 1, False
],
[
'DarkRes', 'residual', 2, False, 128, None, None, None, None,
'leaky', -1, 1, 2, False
],
[
'DarkRes', 'residual', 8, False, 256, None, None, None, None,
'leaky', -1, 1, 3, True
],
[
'DarkRes', 'residual', 8, False, 512, None, None, None, None,
'leaky', -1, 2, 4, True
],
[
'DarkRes', 'residual', 4, False, 1024, None, None, None, None,
'leaky', -1, 4, 5, True
],
]
}
CSPDARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 28
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 2, 'same', 'leaky', -1, 1, 0,
False
],
[
'ConvBN', None, 1, False, 64, None, 3, 2, 'same', 'leaky', -1, 1, 1,
False
],
[
'CSPTiny', 'csp_tiny', 1, False, 64, None, 3, 2, 'same', 'leaky',
-1, 1, 2, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 128, None, 3, 2, 'same', 'leaky',
-1, 1, 3, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 256, None, 3, 2, 'same', 'leaky',
-1, 1, 4, True
],
[
'ConvBN', None, 1, False, 512, None, 3, 1, 'same', 'leaky', -1, 1,
5, True
],
]
}
DARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 14
},
'backbone': [
[
'ConvBN', None, 1, False, 16, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkTiny', 'tiny', 1, True, 32, None, 3, 2, 'same', 'leaky', -1, 1,
1, False
],
[
'DarkTiny', 'tiny', 1, True, 64, None, 3, 2, 'same', 'leaky', -1, 1,
2, False
],
[
'DarkTiny', 'tiny', 1, False, 128, None, 3, 2, 'same', 'leaky', -1,
1, 3, False
],
[
'DarkTiny', 'tiny', 1, False, 256, None, 3, 2, 'same', 'leaky', -1,
1, 4, True
],
[
'DarkTiny', 'tiny', 1, False, 512, None, 3, 2, 'same', 'leaky', -1,
1, 5, False
],
[
'DarkTiny', 'tiny', 1, False, 1024, None, 3, 1, 'same', 'leaky', -1,
1, 5, True
],
]
}
BACKBONES = {
'darknettiny': DARKNETTINY,
'darknet53': DARKNET53,
'cspdarknet53': CSPDARKNET53,
'altered_cspdarknet53': CSPADARKNET53,
'cspdarknettiny': CSPDARKNETTINY,
'csp-large': LARGECSP53,
}
class Darknet(tf.keras.Model):
"""The Darknet backbone architecture."""
def __init__(
self,
model_id='darknet53',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
min_level=None,
max_level=5,
width_scale=1.0,
depth_scale=1.0,
use_reorg_input=False,
csp_level_mod=(),
activation=None,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
dilate=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
layer_specs, splits = Darknet.get_model_config(model_id)
self._model_name = model_id
self._splits = splits
self._input_specs = input_specs
self._registry = LayerBuilder()
# default layer look up
self._min_size = min_level
self._max_size = max_level
self._output_specs = None
self._csp_level_mod = set(csp_level_mod)
self._kernel_initializer = kernel_initializer
self._bias_regularizer = bias_regularizer
self._norm_momentum = norm_momentum
self._norm_epislon = norm_epsilon
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._activation = activation
self._kernel_regularizer = kernel_regularizer
self._dilate = dilate
self._width_scale = width_scale
self._depth_scale = depth_scale
self._use_reorg_input = use_reorg_input
self._default_dict = {
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
'use_separable_conv': self._use_separable_conv,
'dilation_rate': 1,
'name': None
}
inputs = tf.keras.Input(shape=input_specs.shape[1:])
output = self._build_struct(layer_specs, inputs)
super().__init__(
inputs=inputs, outputs=output, name=self._model_name, **kwargs
)
@property
def output_specs(self):
return self._output_specs
@property
def splits(self):
return self._splits
def _build_struct(self, net, inputs):
if self._use_reorg_input:
inputs = nn_blocks.Reorg()(inputs)
net[0].filters = net[1].filters
net[0].output_name = net[1].output_name
del net[1]
endpoints = collections.OrderedDict()
stack_outputs = [inputs]
for i, config in enumerate(net):
if config.output_name > self._max_size:
break
if config.output_name in self._csp_level_mod:
config.stack = 'residual'
config.filters = int(config.filters * self._width_scale)
config.repetitions = int(config.repetitions * self._depth_scale)
if config.stack is None:
x = self._build_block(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'residual':
x = self._residual_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp':
x = self._csp_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp_tiny':
x_pass, x = self._csp_tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x_pass)
elif config.stack == 'tiny':
x = self._tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
if (config.is_output and self._min_size is None):
endpoints[str(config.output_name)] = x
elif (self._min_size is not None and
config.output_name >= self._min_size and
config.output_name <= self._max_size):
endpoints[str(config.output_name)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints.keys()}
return endpoints
def _get_activation(self, activation):
if self._activation is None:
return activation
return self._activation
def _csp_stack(self, inputs, config, name):
if config.bottleneck:
csp_filter_scale = 1
residual_filter_scale = 2
scale_filters = 1
else:
csp_filter_scale = 2
residual_filter_scale = 1
scale_filters = 2
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
degrid = int(tf.math.log(float(config.dilation_rate)) / tf.math.log(2.))
else:
self._default_dict['dilation_rate'] = 1
degrid = 0
# swap/add dialation
x, x_route = nn_blocks.CSPRoute(
filters=config.filters,
filter_scale=csp_filter_scale,
downsample=True,
**self._default_dict)(
inputs)
dilated_reps = config.repetitions - degrid
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict,
)(x)
for i in range(dilated_reps, config.repetitions):
self._default_dict['dilation_rate'] = max(
1, self._default_dict['dilation_rate'] // 2
)
self._default_dict['name'] = (
f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
)
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict,
)(x)
self._default_dict['name'] = f'{name}_csp_connect'
output = nn_blocks.CSPConnect(
filters=config.filters,
filter_scale=csp_filter_scale,
**self._default_dict,
)([x, x_route])
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return output
def _csp_tiny_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_tiny'
x, x_route = nn_blocks.CSPTiny(
filters=config.filters, **self._default_dict)(
inputs)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x, x_route
def _tiny_stack(self, inputs, config, name):
x = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=config.strides,
padding='same',
data_format=None,
name=f'{name}_tiny/pool')(
inputs)
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_tiny/conv'
x = nn_blocks.ConvBN(
filters=config.filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
def _residual_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_residual_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
if config.repetitions < 8:
config.repetitions += 2
else:
self._default_dict['dilation_rate'] = 1
x = nn_blocks.DarkResidual(
filters=config.filters, downsample=True, **self._default_dict
)(inputs)
dilated_reps = (
config.repetitions - self._default_dict['dilation_rate'] // 2 - 1
)
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(filters=config.filters, **self._default_dict)(
x
)
for i in range(dilated_reps, config.repetitions - 1):
self._default_dict['dilation_rate'] = (
self._default_dict['dilation_rate'] // 2
)
self._default_dict['name'] = (
f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
)
x = nn_blocks.DarkResidual(filters=config.filters, **self._default_dict)(
x
)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
self._default_dict['dilation_rate'] = 1
return x
def _build_block(self, inputs, config, name):
x = inputs
i = 0
self._default_dict['activation'] = self._get_activation(config.activation)
while i < config.repetitions:
self._default_dict['name'] = f'{name}_{i}'
layer = self._registry(config, self._default_dict)
x = layer(x)
i += 1
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
@staticmethod
def get_model_config(name):
name = name.lower()
backbone = BACKBONES[name]['backbone']
splits = BACKBONES[name]['splits']
return build_block_specs(backbone), splits
@property
def model_id(self):
return self._model_name
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def get_config(self):
layer_config = {
'model_id': self._model_name,
'min_level': self._min_size,
'max_level': self._max_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
}
return layer_config
@factory.register_backbone_builder('darknet')
def build_darknet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds darknet."""
backbone_config = backbone_config.get()
model = Darknet(
model_id=backbone_config.model_id,
min_level=backbone_config.min_level,
max_level=backbone_config.max_level,
input_specs=input_specs,
dilate=backbone_config.dilate,
width_scale=backbone_config.width_scale,
depth_scale=backbone_config.depth_scale,
use_reorg_input=backbone_config.use_reorg_input,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_separable_conv=backbone_config.use_separable_conv,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
return model
| 22,015 | 30.317212 | 80 | py |
models | models-master/official/projects/yolo/modeling/heads/yolov7_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 heads."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.backbones import yolov7 as backbone
from official.projects.yolo.modeling.decoders import yolov7 as decoder
from official.projects.yolo.modeling.heads import yolov7_head as head
_INPUT_SIZE = (224, 224)
class YoloV7DetectionHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 detection head."""
tf.keras.backend.set_image_data_format('channels_last')
backbone_network = backbone.YoloV7(model_id)
decoder_network = decoder.YoloV7(backbone_network.output_specs, model_id)
head_network = head.YoloV7DetectionHead()
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = head_network(decoder_network(backbone_network(inputs)))
for level, level_output in outputs.items():
scale = 2 ** int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
head_config = head_network.get_config()
num_classes = head_config['num_classes']
num_anchors = head_config['num_anchors']
self.assertAllEqual(
(1, *input_size, num_anchors, num_classes + 5),
level_output.shape.as_list(),
)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
num_classes=3,
min_level=3,
max_level=5,
num_anchors=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
network = head.YoloV7DetectionHead(**kwargs)
# Create another network object from the first object's config.
new_network = head.YoloV7DetectionHead.from_config(network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,724 | 34.38961 | 79 | py |
models | models-master/official/projects/yolo/modeling/heads/yolo_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo heads."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.heads import yolo_head as heads
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
def test_network_creation(self):
"""Test creation of YOLO family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = head(inputs)
# print(endpoints)
for key in endpoints.keys():
expected_input_shape = input_shape[key]
expected_input_shape[-1] = (classes + 5) * bps
self.assertAllEqual(endpoints[key].shape.as_list(), expected_input_shape)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = head(inputs)
configs = head.get_config()
head_from_config = heads.YoloHead.from_config(configs)
self.assertAllEqual(head.get_config(), head_from_config.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,323 | 30.405405 | 79 | py |
models | models-master/official/projects/yolo/modeling/heads/yolov7_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 heads."""
import tensorflow as tf
from official.projects.yolo.ops import initializer_ops
class YoloV7DetectionHead(tf.keras.layers.Layer):
"""YOLOv7 Detection Head."""
def __init__(
self,
num_classes=80,
min_level=3,
max_level=5,
num_anchors=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
use_separable_conv=False,
**kwargs,
):
"""Initializes YOLOv7 head.
Args:
num_classes: integer.
min_level: minimum feature level.
max_level: maximum feature level.
num_anchors: integer for number of anchors at each location.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_initializer: bias initializer for convolutional layers.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
use_separable_conv: `bool` wether to use separable convs.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._num_classes = num_classes
self._min_level = min_level
self._max_level = max_level
self._num_anchors = num_anchors
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_separable_conv = use_separable_conv
def _bias_init(self, scale, in_channels, isize=640, no_per_conf=8):
def bias(shape, dtype):
init = tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform')
base = init([in_channels, *shape], dtype=dtype)[0]
base = tf.reshape(base, [self._num_anchors, -1])
box, conf, classes = tf.split(base, [4, 1, -1], axis=-1)
conf += tf.math.log(no_per_conf / ((isize / scale)**2))
classes += tf.math.log(0.6 / (self._num_classes - 0.99))
base = tf.concat([box, conf, classes], axis=-1)
base = tf.reshape(base, [-1])
return base
return bias
def build(self, input_shape):
self._convs = []
self._implicit_adds = []
self._implicit_muls = []
conv_op = (
tf.keras.layers.SeparableConv2D
if self._use_separable_conv
else tf.keras.layers.Conv2D
)
for level in range(self._min_level, self._max_level + 1):
# Note that we assume height == width.
h = input_shape[str(level)][2]
scale = 2 ** int(level)
in_channels = input_shape[str(level)][-1]
# Outputs are num_classes + 5 (box coordinates + objectness score)
self._convs.append(
conv_op(
(self._num_classes + 5) * self._num_anchors,
kernel_size=1,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_init(scale, in_channels, h * scale),
)
)
self._implicit_adds.append(
self.add_weight(
name=f'implicit_adds_l{level}',
shape=[1, 1, 1, in_channels],
initializer=tf.keras.initializers.random_normal(
mean=0.0, stddev=0.02
),
trainable=True,
)
)
self._implicit_muls.append(
self.add_weight(
name=f'implicit_muls_l{level}',
shape=[1, 1, 1, (self._num_classes + 5) * self._num_anchors],
initializer=tf.keras.initializers.random_normal(
mean=1.0, stddev=0.02
),
trainable=True,
)
)
super().build(input_shape)
def call(self, inputs, training=False):
outputs = {}
for i, level in enumerate(range(self._min_level, self._max_level + 1)):
x = inputs[str(level)]
x = self._implicit_adds[i] + x
x = self._convs[i](x)
x = self._implicit_muls[i] * x
_, h, w, _ = x.get_shape().as_list()
x = tf.reshape(x, [-1, h, w, self._num_anchors, self._num_classes + 5])
outputs[str(level)] = x
return outputs
def get_config(self):
config = dict(
num_classes=self._num_classes,
min_level=self._min_level,
max_level=self._max_level,
num_anchors=self._num_anchors,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
use_separable_conv=self._use_separable_conv,
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,454 | 33.525316 | 78 | py |
models | models-master/official/projects/yolo/modeling/heads/yolo_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo heads."""
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class YoloHead(tf.keras.layers.Layer):
"""YOLO Prediction Head."""
def __init__(self,
min_level,
max_level,
classes=80,
boxes_per_level=3,
output_extras=0,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation=None,
smart_bias=False,
use_separable_conv=False,
**kwargs):
"""Yolo Prediction Head initialization function.
Args:
min_level: `int`, the minimum backbone output level.
max_level: `int`, the maximum backbone output level.
classes: `int`, number of classes per category.
boxes_per_level: `int`, number of boxes to predict per level.
output_extras: `int`, number of additional output channels that the head.
should predict for non-object detection and non-image classification
tasks.
norm_momentum: `float`, normalization momentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
activation: `str`, the activation function to use typically leaky or mish.
smart_bias: `bool`, whether to use smart bias.
use_separable_conv: `bool` wether to use separable convs.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._min_level = min_level
self._max_level = max_level
self._key_list = [
str(key) for key in range(self._min_level, self._max_level + 1)
]
self._classes = classes
self._boxes_per_level = boxes_per_level
self._output_extras = output_extras
self._output_conv = (classes + output_extras + 5) * boxes_per_level
self._smart_bias = smart_bias
self._use_separable_conv = use_separable_conv
self._base_config = dict(
activation=activation,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
self._conv_config = dict(
filters=self._output_conv,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
use_bn=False,
use_separable_conv=self._use_separable_conv,
**self._base_config)
def bias_init(self, scale, inshape, isize=640, no_per_conf=8):
def bias(shape, dtype):
init = tf.keras.initializers.Zeros()
base = init(shape, dtype=dtype)
if self._smart_bias:
base = tf.reshape(base, [self._boxes_per_level, -1])
box, conf, classes = tf.split(base, [4, 1, -1], axis=-1)
conf += tf.math.log(no_per_conf / ((isize / scale)**2))
classes += tf.math.log(0.6 / (self._classes - 0.99))
base = tf.concat([box, conf, classes], axis=-1)
base = tf.reshape(base, [-1])
return base
return bias
def build(self, input_shape):
self._head = dict()
for key in self._key_list:
scale = 2**int(key)
self._head[key] = nn_blocks.ConvBN(
bias_initializer=self.bias_init(scale, input_shape[key][-1]),
**self._conv_config)
def call(self, inputs):
outputs = dict()
for key in self._key_list:
outputs[key] = self._head[key](inputs[key])
return outputs
@property
def output_depth(self):
return (self._classes + self._output_extras + 5) * self._boxes_per_level
@property
def num_boxes(self):
if self._min_level is None or self._max_level is None:
raise Exception(
'Model has to be built before number of boxes can be determined.')
return (self._max_level - self._min_level + 1) * self._boxes_per_level
@property
def num_heads(self):
return self._max_level - self._min_level + 1
def get_config(self):
config = dict(
min_level=self._min_level,
max_level=self._max_level,
classes=self._classes,
boxes_per_level=self._boxes_per_level,
output_extras=self._output_extras,
**self._base_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,234 | 33.440789 | 80 | py |
models | models-master/official/projects/yolo/tasks/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used to train Yolo."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import performance
from official.projects.yolo import optimization
from official.projects.yolo.configs import yolov7 as exp_cfg
from official.projects.yolo.dataloaders import tf_example_decoder
from official.projects.yolo.dataloaders import yolo_input
from official.projects.yolo.losses import yolov7_loss
from official.projects.yolo.modeling import factory
from official.projects.yolo.ops import kmeans_anchors
from official.projects.yolo.ops import mosaic
from official.projects.yolo.ops import preprocessing_ops
from official.projects.yolo.tasks import task_utils
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.ops import box_ops
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.YoloV7Task)
class YoloV7Task(base_task.Task):
"""A single-replica view of training procedure.
YOLO task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def __init__(self, params, logging_dir: Optional[str] = None):
super().__init__(params, logging_dir)
min_level = self.task_config.model.min_level
max_level = self.task_config.model.max_level
anchors_dict = self.task_config.model.anchor_boxes.get(
min_level, max_level)[0]
anchors, strides = [], []
for level in range(min_level, max_level + 1):
anchors.append(anchors_dict[str(level)])
strides.append(2 ** level)
loss_config = self.task_config.model.loss
if loss_config.use_ota:
loss_fn = yolov7_loss.YoloV7LossOTA
else:
loss_fn = yolov7_loss.YoloV7Loss
self._loss_fn = loss_fn(
anchors=anchors,
strides=strides,
input_size=self.task_config.model.input_size[:2],
alpha=loss_config.alpha,
gamma=loss_config.gamma,
box_weight=loss_config.box_weight,
obj_weight=loss_config.obj_weight,
cls_weight=loss_config.cls_weight,
label_smoothing=loss_config.label_smoothing,
anchor_threshold=loss_config.anchor_threshold,
iou_mix_ratio=loss_config.iou_mix_ratio,
num_classes=self.task_config.model.num_classes,
auto_balance=loss_config.auto_balance,
)
self._coco_91_to_80 = False
self._metrics = []
# globally set the random seed
preprocessing_ops.set_random_seeds(seed=params.seed)
if self.task_config.model.anchor_boxes.generate_anchors:
self.generate_anchors()
return
def generate_anchors(self):
"""Generate Anchor boxes for an arbitrary object detection dataset."""
input_size = self.task_config.model.input_size
anchor_cfg = self.task_config.model.anchor_boxes
backbone = self.task_config.model.backbone.get()
dataset = self.task_config.train_data
decoder = self._get_data_decoder(dataset)
num_anchors = backbone.max_level - backbone.min_level + 1
num_anchors *= anchor_cfg.anchors_per_scale
gbs = dataset.global_batch_size
dataset.global_batch_size = 1
box_reader = kmeans_anchors.BoxGenInputReader(
dataset,
dataset_fn=dataset_fn.pick_dataset_fn(
self.task_config.train_data.file_type),
decoder_fn=decoder.decode)
boxes = box_reader.read(
k=num_anchors,
anchors_per_scale=anchor_cfg.anchors_per_scale,
image_resolution=input_size,
scaling_mode=anchor_cfg.scaling_mode,
box_generation_mode=anchor_cfg.box_generation_mode,
num_samples=anchor_cfg.num_samples)
dataset.global_batch_size = gbs
with open('anchors.txt', 'w') as f:
f.write(f'input resolution: {input_size} \n boxes: \n {boxes}')
logging.info('INFO: boxes will be saved to anchors.txt, mack sure to save'
'them and update the boxes feild in you yaml config file.')
anchor_cfg.set_boxes(boxes)
return boxes
def build_model(self):
"""Build an instance of Yolo."""
model_base_cfg = self.task_config.model
l2_weight_decay = self.task_config.weight_decay / 2.0
input_size = model_base_cfg.input_size.copy()
input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay) if l2_weight_decay else None)
model = factory.build_yolov7(input_specs, model_base_cfg, l2_regularizer)
model.build(input_specs.shape)
model.summary(print_fn=logging.info)
# save for later usage within the task.
self._model = model
return model
def _get_data_decoder(self, params):
"""Get a decoder object to decode the dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
self._coco_91_to_80 = decoder_cfg.coco91_to_80
decoder = tf_example_decoder.TfExampleDecoder(
coco91_to_80=decoder_cfg.coco91_to_80,
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
return decoder
def build_inputs(self, params, input_context=None):
"""Build input dataset."""
model = self.task_config.model
# get anchor boxes dict based on models min and max level
backbone = model.backbone.get()
anchor_dict, level_limits = model.anchor_boxes.get(backbone.min_level,
backbone.max_level)
params.seed = self.task_config.seed
# set shared patamters between mosaic and yolo_input
base_config = dict(
letter_box=params.parser.letter_box,
aug_rand_translate=params.parser.aug_rand_translate,
aug_rand_angle=params.parser.aug_rand_angle,
aug_rand_perspective=params.parser.aug_rand_perspective,
area_thresh=params.parser.area_thresh,
random_flip=params.parser.random_flip,
seed=params.seed,
)
# get the decoder
decoder = self._get_data_decoder(params)
# init Mosaic
sample_fn = mosaic.Mosaic(
output_size=model.input_size,
mosaic_frequency=params.parser.mosaic.mosaic_frequency,
mosaic9_frequency=params.parser.mosaic.mosaic9_frequency,
mixup_frequency=params.parser.mosaic.mixup_frequency,
jitter=params.parser.mosaic.jitter,
mosaic_center=params.parser.mosaic.mosaic_center,
mosaic9_center=params.parser.mosaic.mosaic9_center,
mosaic_crop_mode=params.parser.mosaic.mosaic_crop_mode,
aug_scale_min=params.parser.mosaic.aug_scale_min,
aug_scale_max=params.parser.mosaic.aug_scale_max,
**base_config)
# init Parser
parser = yolo_input.Parser(
output_size=model.input_size,
anchors=anchor_dict,
use_tie_breaker=params.parser.use_tie_breaker,
jitter=params.parser.jitter,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hue=params.parser.aug_rand_hue,
aug_rand_saturation=params.parser.aug_rand_saturation,
aug_rand_brightness=params.parser.aug_rand_brightness,
max_num_instances=params.parser.max_num_instances,
scale_xy=model.detection_generator.scale_xy.get(),
expanded_strides=model.detection_generator.path_scales.get(),
darknet=False,
best_match_only=params.parser.best_match_only,
anchor_t=params.parser.anchor_thresh,
random_pad=params.parser.random_pad,
level_limits=level_limits,
dtype=params.dtype,
**base_config,
)
# init the dataset reader
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
sample_fn=sample_fn.mosaic_fn(is_training=params.is_training),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_metrics(self, training=True):
"""Build detection metrics."""
metrics = []
metrics = [
task_utils.ListMetrics(
['box_loss', 'obj_loss', 'cls_loss', 'iou'], 'separate_losses'
),
task_utils.ListMetrics(
['num_matchings', 'num_gts', 'num_duplicates'], 'stats'
),
]
self._metrics = metrics
if not training:
annotation_file = self.task_config.annotation_file
if self._coco_91_to_80:
annotation_file = None
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
max_num_eval_detections=self.task_config.max_num_eval_detections)
return metrics
def build_losses(self, outputs, labels, aux_losses=None):
"""Build YOLOv7 losses."""
return self._loss_fn(labels, outputs)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Train Step.
Forward step and backwards propagate the model.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
with tf.GradientTape(persistent=False) as tape:
# Compute a prediction
y_pred = model(image, training=True)
# Cast to float32 for gradietn computation
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
# Get the total loss
loss = self.build_losses(y_pred['raw_output'], label)
scaled_loss = loss
# Scale the loss for numerical stability
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
# Compute the gradient
train_vars = model.trainable_variables
gradients = tape.gradient(scaled_loss, train_vars)
# Get unscaled loss if we are using the loss scale optimizer on fp16
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
# Apply gradients to the model
optimizer.apply_gradients(zip(gradients, train_vars))
logs = {self.loss: loss}
# Compute all metrics
if metrics:
metrics[0].update_state(self._loss_fn.report_separate_losses())
logs.update({metrics[0].name: metrics[0].result()})
metrics[1].update_state(self._loss_fn.report_stats())
logs.update({metrics[1].name: metrics[1].result()})
return logs
def _reorg_boxes(self, boxes, info, num_detections):
"""Scale and Clean boxes prior to Evaluation."""
mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])
mask = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype)
# Denormalize the boxes by the shape of the image
inshape = tf.expand_dims(info[:, 1, :], axis=1)
ogshape = tf.expand_dims(info[:, 0, :], axis=1)
scale = tf.expand_dims(info[:, 2, :], axis=1)
offset = tf.expand_dims(info[:, 3, :], axis=1)
boxes = box_ops.denormalize_boxes(boxes, inshape)
boxes = box_ops.clip_boxes(boxes, inshape)
boxes += tf.tile(offset, [1, 1, 2])
boxes /= tf.tile(scale, [1, 1, 2])
boxes = box_ops.clip_boxes(boxes, ogshape)
# Mask the boxes for usage
boxes *= mask
boxes += (mask - 1)
return boxes
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
# Step the model once
y_pred = model(image, training=False)
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
loss_val = self.build_losses(y_pred['raw_output'], label)
logs = {self.loss: loss_val}
# Reorganize and rescale the boxes
info = label['groundtruths']['image_info']
boxes = self._reorg_boxes(y_pred['bbox'], info, y_pred['num_detections'])
# Build the input for the coc evaluation metric
coco_model_outputs = {
'detection_boxes': boxes,
'detection_scores': y_pred['confidence'],
'detection_classes': y_pred['classes'],
'num_detections': y_pred['num_detections'],
'source_id': label['groundtruths']['source_id'],
'image_info': label['groundtruths']['image_info']
}
# Compute all metrics
if metrics:
logs.update(
{self.coco_metric.name: (label['groundtruths'], coco_model_outputs)})
if metrics:
metrics[0].update_state(self._loss_fn.report_separate_losses())
logs.update({metrics[0].name: metrics[0].result()})
metrics[1].update_state(self._loss_fn.report_stats())
logs.update({metrics[1].name: metrics[1].result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Get Metric Results."""
if not state:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduce logs and remove unneeded items. Update with COCO results."""
res = self.coco_metric.result()
return res
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
logging.info('Training from Scratch.')
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
opt_factory = optimization.YoloOptimizerFactory(optimizer_config)
# pylint: disable=protected-access
ema = opt_factory._use_ema
opt_factory._use_ema = False
opt_type = opt_factory._optimizer_type
if opt_type == 'sgd_torch':
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
optimizer.set_bias_lr(
opt_factory.get_bias_lr_schedule(self._task_config.smart_bias_lr))
optimizer.search_and_set_variable_groups(self._model.trainable_variables)
else:
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
opt_factory._use_ema = ema
if ema:
logging.info('EMA is enabled.')
optimizer = opt_factory.add_ema(optimizer)
# pylint: enable=protected-access
if runtime_config and runtime_config.loss_scale:
use_float16 = runtime_config.mixed_precision_dtype == 'float16'
optimizer = performance.configure_optimizer(
optimizer,
use_float16=use_float16,
loss_scale=runtime_config.loss_scale)
return optimizer
| 17,679 | 35.833333 | 80 | py |
models | models-master/official/projects/yolo/tasks/yolo.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used to train Yolo."""
import collections
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import performance
from official.projects.yolo import optimization
from official.projects.yolo.configs import yolo as exp_cfg
from official.projects.yolo.dataloaders import tf_example_decoder
from official.projects.yolo.dataloaders import yolo_input
from official.projects.yolo.modeling import factory
from official.projects.yolo.ops import kmeans_anchors
from official.projects.yolo.ops import mosaic
from official.projects.yolo.ops import preprocessing_ops
from official.projects.yolo.tasks import task_utils
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.ops import box_ops
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.YoloTask)
class YoloTask(base_task.Task):
"""A single-replica view of training procedure.
YOLO task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def __init__(self, params, logging_dir: Optional[str] = None):
super().__init__(params, logging_dir)
self.coco_metric = None
self._loss_fn = None
self._model = None
self._coco_91_to_80 = False
self._metrics = []
# globally set the random seed
preprocessing_ops.set_random_seeds(seed=params.seed)
if self.task_config.model.anchor_boxes.generate_anchors:
self.generate_anchors()
return
def generate_anchors(self):
"""Generate Anchor boxes for an arbitrary object detection dataset."""
input_size = self.task_config.model.input_size
anchor_cfg = self.task_config.model.anchor_boxes
backbone = self.task_config.model.backbone.get()
dataset = self.task_config.train_data
decoder = self._get_data_decoder(dataset)
num_anchors = backbone.max_level - backbone.min_level + 1
num_anchors *= anchor_cfg.anchors_per_scale
gbs = dataset.global_batch_size
dataset.global_batch_size = 1
box_reader = kmeans_anchors.BoxGenInputReader(
dataset,
dataset_fn=dataset_fn.pick_dataset_fn(
self.task_config.train_data.file_type),
decoder_fn=decoder.decode)
boxes = box_reader.read(
k=num_anchors,
anchors_per_scale=anchor_cfg.anchors_per_scale,
image_resolution=input_size,
scaling_mode=anchor_cfg.scaling_mode,
box_generation_mode=anchor_cfg.box_generation_mode,
num_samples=anchor_cfg.num_samples)
dataset.global_batch_size = gbs
with open('anchors.txt', 'w') as f:
f.write(f'input resolution: {input_size} \n boxes: \n {boxes}')
logging.info('INFO: boxes will be saved to anchors.txt, mack sure to save'
'them and update the boxes feild in you yaml config file.')
anchor_cfg.set_boxes(boxes)
return boxes
def build_model(self):
"""Build an instance of Yolo."""
model_base_cfg = self.task_config.model
l2_weight_decay = self.task_config.weight_decay / 2.0
input_size = model_base_cfg.input_size.copy()
input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay) if l2_weight_decay else None)
model, losses = factory.build_yolo(
input_specs, model_base_cfg, l2_regularizer)
model.build(input_specs.shape)
model.summary(print_fn=logging.info)
# save for later usage within the task.
self._loss_fn = losses
self._model = model
return model
def _get_data_decoder(self, params):
"""Get a decoder object to decode the dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
self._coco_91_to_80 = decoder_cfg.coco91_to_80
decoder = tf_example_decoder.TfExampleDecoder(
coco91_to_80=decoder_cfg.coco91_to_80,
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
return decoder
def build_inputs(self, params, input_context=None):
"""Build input dataset."""
model = self.task_config.model
# get anchor boxes dict based on models min and max level
backbone = model.backbone.get()
anchor_dict, level_limits = model.anchor_boxes.get(backbone.min_level,
backbone.max_level)
params.seed = self.task_config.seed
# set shared patamters between mosaic and yolo_input
base_config = dict(
letter_box=params.parser.letter_box,
aug_rand_translate=params.parser.aug_rand_translate,
aug_rand_angle=params.parser.aug_rand_angle,
aug_rand_perspective=params.parser.aug_rand_perspective,
area_thresh=params.parser.area_thresh,
random_flip=params.parser.random_flip,
seed=params.seed,
)
# get the decoder
decoder = self._get_data_decoder(params)
# init Mosaic
sample_fn = mosaic.Mosaic(
output_size=model.input_size,
mosaic_frequency=params.parser.mosaic.mosaic_frequency,
mixup_frequency=params.parser.mosaic.mixup_frequency,
jitter=params.parser.mosaic.jitter,
mosaic_center=params.parser.mosaic.mosaic_center,
mosaic_crop_mode=params.parser.mosaic.mosaic_crop_mode,
aug_scale_min=params.parser.mosaic.aug_scale_min,
aug_scale_max=params.parser.mosaic.aug_scale_max,
**base_config)
# init Parser
parser = yolo_input.Parser(
output_size=model.input_size,
anchors=anchor_dict,
use_tie_breaker=params.parser.use_tie_breaker,
jitter=params.parser.jitter,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hue=params.parser.aug_rand_hue,
aug_rand_saturation=params.parser.aug_rand_saturation,
aug_rand_brightness=params.parser.aug_rand_brightness,
max_num_instances=params.parser.max_num_instances,
scale_xy=model.detection_generator.scale_xy.get(),
expanded_strides=model.detection_generator.path_scales.get(),
darknet=model.darknet_based_model,
best_match_only=params.parser.best_match_only,
anchor_t=params.parser.anchor_thresh,
random_pad=params.parser.random_pad,
level_limits=level_limits,
dtype=params.dtype,
**base_config)
# init the dataset reader
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
sample_fn=sample_fn.mosaic_fn(is_training=params.is_training),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_metrics(self, training=True):
"""Build detection metrics."""
metrics = []
backbone = self.task_config.model.backbone.get()
metric_names = collections.defaultdict(list)
for key in range(backbone.min_level, backbone.max_level + 1):
key = str(key)
metric_names[key].append('loss')
metric_names[key].append('avg_iou')
metric_names[key].append('avg_obj')
metric_names['net'].append('box')
metric_names['net'].append('class')
metric_names['net'].append('conf')
for _, key in enumerate(metric_names.keys()):
metrics.append(task_utils.ListMetrics(metric_names[key], name=key))
self._metrics = metrics
if not training:
annotation_file = self.task_config.annotation_file
if self._coco_91_to_80:
annotation_file = None
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
max_num_eval_detections=self.task_config.max_num_eval_detections)
return metrics
def build_losses(self, outputs, labels, aux_losses=None):
"""Build YOLO losses."""
return self._loss_fn(labels, outputs)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Train Step.
Forward step and backwards propagate the model.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
with tf.GradientTape(persistent=False) as tape:
# Compute a prediction
y_pred = model(image, training=True)
# Cast to float32 for gradietn computation
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
# Get the total loss
(scaled_loss, metric_loss,
loss_metrics) = self.build_losses(y_pred['raw_output'], label)
# Scale the loss for numerical stability
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
# Compute the gradient
train_vars = model.trainable_variables
gradients = tape.gradient(scaled_loss, train_vars)
# Get unscaled loss if we are using the loss scale optimizer on fp16
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
# Apply gradients to the model
optimizer.apply_gradients(zip(gradients, train_vars))
logs = {self.loss: metric_loss}
# Compute all metrics
if metrics:
for m in metrics:
m.update_state(loss_metrics[m.name])
logs.update({m.name: m.result()})
return logs
def _reorg_boxes(self, boxes, info, num_detections):
"""Scale and Clean boxes prior to Evaluation."""
mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])
mask = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype)
# Denormalize the boxes by the shape of the image
inshape = tf.expand_dims(info[:, 1, :], axis=1)
ogshape = tf.expand_dims(info[:, 0, :], axis=1)
scale = tf.expand_dims(info[:, 2, :], axis=1)
offset = tf.expand_dims(info[:, 3, :], axis=1)
boxes = box_ops.denormalize_boxes(boxes, inshape)
boxes = box_ops.clip_boxes(boxes, inshape)
boxes += tf.tile(offset, [1, 1, 2])
boxes /= tf.tile(scale, [1, 1, 2])
boxes = box_ops.clip_boxes(boxes, ogshape)
# Mask the boxes for usage
boxes *= mask
boxes += (mask - 1)
return boxes
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
# Step the model once
y_pred = model(image, training=False)
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
(_, metric_loss, loss_metrics) = self.build_losses(y_pred['raw_output'],
label)
logs = {self.loss: metric_loss}
# Reorganize and rescale the boxes
info = label['groundtruths']['image_info']
boxes = self._reorg_boxes(y_pred['bbox'], info, y_pred['num_detections'])
# Build the input for the coc evaluation metric
coco_model_outputs = {
'detection_boxes': boxes,
'detection_scores': y_pred['confidence'],
'detection_classes': y_pred['classes'],
'num_detections': y_pred['num_detections'],
'source_id': label['groundtruths']['source_id'],
'image_info': label['groundtruths']['image_info']
}
# Compute all metrics
if metrics:
logs.update(
{self.coco_metric.name: (label['groundtruths'], coco_model_outputs)})
for m in metrics:
m.update_state(loss_metrics[m.name])
logs.update({m.name: m.result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Get Metric Results."""
if not state:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduce logs and remove unneeded items. Update with COCO results."""
res = self.coco_metric.result()
return res
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
logging.info('Training from Scratch.')
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
opt_factory = optimization.YoloOptimizerFactory(optimizer_config)
# pylint: disable=protected-access
ema = opt_factory._use_ema
opt_factory._use_ema = False
opt_type = opt_factory._optimizer_type
if opt_type == 'sgd_torch':
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
optimizer.set_bias_lr(
opt_factory.get_bias_lr_schedule(self._task_config.smart_bias_lr))
optimizer.search_and_set_variable_groups(self._model.trainable_variables)
else:
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
opt_factory._use_ema = ema
if ema:
logging.info('EMA is enabled.')
optimizer = opt_factory.add_ema(optimizer)
# pylint: enable=protected-access
if runtime_config and runtime_config.loss_scale:
use_float16 = runtime_config.mixed_precision_dtype == 'float16'
optimizer = performance.configure_optimizer(
optimizer,
use_float16=use_float16,
loss_scale=runtime_config.loss_scale)
return optimizer
| 16,647 | 35.669604 | 80 | py |
models | models-master/official/projects/yolo/tasks/task_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for yolo task."""
import tensorflow as tf
class ListMetrics:
"""Private class used to cleanly place the matric values for each level."""
def __init__(self, metric_names, name="ListMetrics"):
self.name = name
self._metric_names = metric_names
self._metrics = self.build_metric()
return
def build_metric(self):
metric_names = self._metric_names
metrics = []
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
return metrics
def update_state(self, loss_metrics):
metrics = self._metrics
for m in metrics:
m.update_state(loss_metrics[m.name])
return
def result(self):
logs = dict()
metrics = self._metrics
for m in metrics:
logs.update({m.name: m.result()})
return logs
def reset_states(self):
metrics = self._metrics
for m in metrics:
m.reset_states()
return
| 1,526 | 27.811321 | 77 | py |
models | models-master/official/projects/yolo/ops/loss_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo loss utility functions."""
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import math_ops
@tf.custom_gradient
def sigmoid_bce(y, x_prime, label_smoothing):
"""Applies the Sigmoid Cross Entropy Loss.
Implements the same derivative as that found in the Darknet C library.
The derivative of this method is not the same as the standard binary cross
entropy with logits function.
The BCE with logits function equation is as follows:
x = 1 / (1 + exp(-x_prime))
bce = -ylog(x) - (1 - y)log(1 - x)
The standard BCE with logits function derivative is as follows:
dloss = -y/x + (1-y)/(1-x)
dsigmoid = x * (1 - x)
dx = dloss * dsigmoid
This derivative can be reduced simply to:
dx = (-y + x)
This simplification is used by the darknet library in order to improve
training stability. The gradient is almost the same
as tf.keras.losses.binary_crossentropy but varies slightly and
yields different performance.
Args:
y: `Tensor` holding ground truth data.
x_prime: `Tensor` holding the predictions prior to application of the
sigmoid operation.
label_smoothing: float value between 0.0 and 1.0 indicating the amount of
smoothing to apply to the data.
Returns:
bce: Tensor of the be applied loss values.
delta: callable function indicating the custom gradient for this operation.
"""
eps = 1e-9
x = tf.math.sigmoid(x_prime)
y = tf.stop_gradient(y * (1 - label_smoothing) + 0.5 * label_smoothing)
bce = -y * tf.math.log(x + eps) - (1 - y) * tf.math.log(1 - x + eps)
def delta(dpass):
x = tf.math.sigmoid(x_prime)
dx = (-y + x) * dpass
dy = tf.zeros_like(y)
return dy, dx, 0.0
return bce, delta
def apply_mask(mask, x, value=0):
"""This function is used for gradient masking.
The YOLO loss function makes extensive use of dynamically shaped tensors.
To allow this use case on the TPU while preserving the gradient correctly
for back propagation we use this masking function to use a tf.where operation
to hard set masked location to have a gradient and a value of zero.
Args:
mask: A `Tensor` with the same shape as x used to select values of
importance.
x: A `Tensor` with the same shape as mask that will be getting masked.
value: `float` constant additive value.
Returns:
x: A masked `Tensor` with the same shape as x.
"""
mask = tf.cast(mask, tf.bool)
masked = tf.where(mask, x, tf.zeros_like(x) + value)
return masked
def build_grid(indexes, truths, preds, ind_mask, update=False, grid=None):
"""This function is used to broadcast elements into the output shape.
This function is used to broadcasts a list of truths into the correct index
in the output shape. This is used for the ground truth map construction in
the scaled loss and the classification map in the darknet loss.
Args:
indexes: A `Tensor` for the indexes
truths: A `Tensor` for the ground truth.
preds: A `Tensor` for the predictions.
ind_mask: A `Tensor` for the index masks.
update: A `bool` for updating the grid.
grid: A `Tensor` for the grid.
Returns:
grid: A `Tensor` representing the augmented grid.
"""
# this function is used to broadcast all the indexes to the correct
# into the correct ground truth mask, used for iou detection map
# in the scaled loss and the classification mask in the darknet loss
num_flatten = tf.shape(preds)[-1]
# is there a way to verify that we are not on the CPU?
ind_mask = tf.cast(ind_mask, indexes.dtype)
# find all the batch indexes using the cumulated sum of a ones tensor
# cumsum(ones) - 1 yeild the zero indexed batches
bhep = tf.reduce_max(tf.ones_like(indexes), axis=-1, keepdims=True)
bhep = tf.math.cumsum(bhep, axis=0) - 1
# concatnate the batch sizes to the indexes
indexes = tf.concat([bhep, indexes], axis=-1)
indexes = apply_mask(tf.cast(ind_mask, indexes.dtype), indexes)
indexes = (indexes + (ind_mask - 1))
# mask truths
truths = apply_mask(tf.cast(ind_mask, truths.dtype), truths)
truths = (truths + (tf.cast(ind_mask, truths.dtype) - 1))
# reshape the indexes into the correct shape for the loss,
# just flatten all indexes but the last
indexes = tf.reshape(indexes, [-1, 4])
# also flatten the ground truth value on all axis but the last
truths = tf.reshape(truths, [-1, num_flatten])
# build a zero grid in the samve shape as the predicitons
if grid is None:
grid = tf.zeros_like(preds)
# remove invalid values from the truths that may have
# come up from computation, invalid = nan and inf
truths = math_ops.rm_nan_inf(truths)
# scatter update the zero grid
if update:
grid = tf.tensor_scatter_nd_update(grid, indexes, truths)
else:
grid = tf.tensor_scatter_nd_max(grid, indexes, truths)
# stop gradient and return to avoid TPU errors and save compute
# resources
return grid
class GridGenerator:
"""Grid generator that generates anchor grids for box decoding."""
def __init__(self, anchors, scale_anchors=None):
"""Initialize Grid Generator.
Args:
anchors: A `List[List[int]]` for the anchor boxes that are used in the
model at all levels.
scale_anchors: An `int` for how much to scale this level to get the
original input shape.
"""
self.dtype = tf.keras.backend.floatx()
self._scale_anchors = scale_anchors
self._anchors = tf.convert_to_tensor(anchors)
return
def _build_grid_points(self, lheight, lwidth, anchors, dtype):
"""Generate a grid of fixed grid edges for box center decoding."""
with tf.name_scope('center_grid'):
y = tf.range(0, lheight)
x = tf.range(0, lwidth)
x_left = tf.tile(
tf.transpose(tf.expand_dims(x, axis=-1), perm=[1, 0]), [lheight, 1])
y_left = tf.tile(tf.expand_dims(y, axis=-1), [1, lwidth])
x_y = tf.stack([x_left, y_left], axis=-1)
x_y = tf.cast(x_y, dtype=dtype)
num = tf.shape(anchors)[0]
x_y = tf.expand_dims(
tf.tile(tf.expand_dims(x_y, axis=-2), [1, 1, num, 1]), axis=0)
return x_y
def _build_anchor_grid(self, height, width, anchors, dtype):
"""Get the transformed anchor boxes for each dimention."""
with tf.name_scope('anchor_grid'):
num = tf.shape(anchors)[0]
anchors = tf.cast(anchors, dtype=dtype)
anchors = tf.reshape(anchors, [1, 1, 1, num, 2])
anchors = tf.tile(anchors, [1, tf.cast(height, tf.int32),
tf.cast(width, tf.int32), 1, 1])
return anchors
def _extend_batch(self, grid, batch_size):
return tf.tile(grid, [batch_size, 1, 1, 1, 1])
def __call__(self, height, width, batch_size, dtype=None):
if dtype is None:
self.dtype = tf.keras.backend.floatx()
else:
self.dtype = dtype
grid_points = self._build_grid_points(height, width, self._anchors,
self.dtype)
anchor_grid = self._build_anchor_grid(
height, width,
tf.cast(self._anchors, self.dtype) /
tf.cast(self._scale_anchors, self.dtype), self.dtype)
grid_points = self._extend_batch(grid_points, batch_size)
anchor_grid = self._extend_batch(anchor_grid, batch_size)
return grid_points, anchor_grid
TILE_SIZE = 50
class PairWiseSearch:
"""Apply a pairwise search between the ground truth and the labels.
The goal is to indicate the locations where the predictions overlap with
ground truth for dynamic ground truth associations.
"""
def __init__(self,
iou_type='iou',
any_match=True,
min_conf=0.0,
track_boxes=False,
track_classes=False):
"""Initialization of Pair Wise Search.
Args:
iou_type: An `str` for the iou type to use.
any_match: A `bool` for any match(no class match).
min_conf: An `int` for minimum confidence threshold.
track_boxes: A `bool` dynamic box assignment.
track_classes: A `bool` dynamic class assignment.
"""
self.iou_type = iou_type
self._any = any_match
self._min_conf = min_conf
self._track_boxes = track_boxes
self._track_classes = track_classes
return
def box_iou(self, true_box, pred_box):
# based on the type of loss, compute the iou loss for a box
# compute_<name> indicated the type of iou to use
if self.iou_type == 'giou':
_, iou = box_ops.compute_giou(true_box, pred_box)
elif self.iou_type == 'ciou':
_, iou = box_ops.compute_ciou(true_box, pred_box)
else:
iou = box_ops.compute_iou(true_box, pred_box)
return iou
def _search_body(self, pred_box, pred_class, boxes, classes, running_boxes,
running_classes, max_iou, idx):
"""Main search fn."""
# capture the batch size to be used, and gather a slice of
# boxes from the ground truth. currently TILE_SIZE = 50, to
# save memory
batch_size = tf.shape(boxes)[0]
box_slice = tf.slice(boxes, [0, idx * TILE_SIZE, 0],
[batch_size, TILE_SIZE, 4])
# match the dimentions of the slice to the model predictions
# shape: [batch_size, 1, 1, num, TILE_SIZE, 4]
box_slice = tf.expand_dims(box_slice, axis=1)
box_slice = tf.expand_dims(box_slice, axis=1)
box_slice = tf.expand_dims(box_slice, axis=1)
box_grid = tf.expand_dims(pred_box, axis=-2)
# capture the classes
class_slice = tf.slice(classes, [0, idx * TILE_SIZE],
[batch_size, TILE_SIZE])
class_slice = tf.expand_dims(class_slice, axis=1)
class_slice = tf.expand_dims(class_slice, axis=1)
class_slice = tf.expand_dims(class_slice, axis=1)
iou = self.box_iou(box_slice, box_grid)
if self._min_conf > 0.0:
if not self._any:
class_grid = tf.expand_dims(pred_class, axis=-2)
class_mask = tf.one_hot(
tf.cast(class_slice, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
class_mask = tf.reduce_any(tf.equal(class_mask, class_grid), axis=-1)
else:
class_mask = tf.reduce_max(pred_class, axis=-1, keepdims=True)
class_mask = tf.cast(class_mask, iou.dtype)
iou *= class_mask
max_iou_ = tf.concat([max_iou, iou], axis=-1)
max_iou = tf.reduce_max(max_iou_, axis=-1, keepdims=True)
ind = tf.expand_dims(tf.argmax(max_iou_, axis=-1), axis=-1)
if self._track_boxes:
running_boxes = tf.expand_dims(running_boxes, axis=-2)
box_slice = tf.zeros_like(running_boxes) + box_slice
box_slice = tf.concat([running_boxes, box_slice], axis=-2)
running_boxes = tf.gather_nd(box_slice, ind, batch_dims=4)
if self._track_classes:
running_classes = tf.expand_dims(running_classes, axis=-1)
class_slice = tf.zeros_like(running_classes) + class_slice
class_slice = tf.concat([running_classes, class_slice], axis=-1)
running_classes = tf.gather_nd(class_slice, ind, batch_dims=4)
return (pred_box, pred_class, boxes, classes, running_boxes,
running_classes, max_iou, idx + 1)
def __call__(self,
pred_boxes,
pred_classes,
boxes,
classes,
clip_thresh=0.0):
num_boxes = tf.shape(boxes)[-2]
num_tiles = (num_boxes // TILE_SIZE) - 1
if self._min_conf > 0.0:
pred_classes = tf.cast(pred_classes > self._min_conf, pred_classes.dtype)
def _loop_cond(unused_pred_box, unused_pred_class, boxes, unused_classes,
unused_running_boxes, unused_running_classes, unused_max_iou,
idx):
# check that the slice has boxes that all zeros
batch_size = tf.shape(boxes)[0]
box_slice = tf.slice(boxes, [0, idx * TILE_SIZE, 0],
[batch_size, TILE_SIZE, 4])
return tf.logical_and(idx < num_tiles,
tf.math.greater(tf.reduce_sum(box_slice), 0))
running_boxes = tf.zeros_like(pred_boxes)
running_classes = tf.zeros_like(tf.reduce_sum(running_boxes, axis=-1))
max_iou = tf.zeros_like(tf.reduce_sum(running_boxes, axis=-1))
max_iou = tf.expand_dims(max_iou, axis=-1)
(pred_boxes, pred_classes, boxes, classes, running_boxes, running_classes,
max_iou, _) = tf.while_loop(_loop_cond, self._search_body, [
pred_boxes, pred_classes, boxes, classes, running_boxes,
running_classes, max_iou,
tf.constant(0)
])
mask = tf.cast(max_iou > clip_thresh, running_boxes.dtype)
running_boxes *= mask
running_classes *= tf.squeeze(mask, axis=-1)
max_iou *= mask
max_iou = tf.squeeze(max_iou, axis=-1)
mask = tf.squeeze(mask, axis=-1)
return (tf.stop_gradient(running_boxes), tf.stop_gradient(running_classes),
tf.stop_gradient(max_iou), tf.stop_gradient(mask))
def average_iou(iou):
"""Computes the average intersection over union without counting locations.
where the iou is zero.
Args:
iou: A `Tensor` representing the iou values.
Returns:
tf.stop_gradient(avg_iou): A `Tensor` representing average
intersection over union.
"""
iou_sum = tf.reduce_sum(iou, axis=tf.range(1, tf.shape(tf.shape(iou))[0]))
counts = tf.cast(
tf.math.count_nonzero(iou, axis=tf.range(1,
tf.shape(tf.shape(iou))[0])),
iou.dtype)
avg_iou = tf.reduce_mean(math_ops.divide_no_nan(iou_sum, counts))
return tf.stop_gradient(avg_iou)
def _scale_boxes(encoded_boxes, width, height, anchor_grid, grid_points,
scale_xy):
"""Decodes models boxes applying and exponential to width and height maps."""
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
scale_xy = tf.cast(scale_xy, encoded_boxes.dtype)
# apply the sigmoid
pred_xy = tf.math.sigmoid(pred_xy)
# scale the centers and find the offset of each box relative to
# their center pixel
pred_xy = pred_xy * scale_xy - 0.5 * (scale_xy - 1)
# scale the offsets and add them to the grid points or a tensor that is
# the realtive location of each pixel
box_xy = grid_points + pred_xy
# scale the width and height of the predictions and corlate them
# to anchor boxes
box_wh = tf.math.exp(pred_wh) * anchor_grid
# build the final predicted box
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
pred_box = scaled_box / scaler
# shift scaled boxes
scaled_box = tf.concat([pred_xy, box_wh], axis=-1)
return (scaler, scaled_box, pred_box)
@tf.custom_gradient
def _darknet_boxes(encoded_boxes, width, height, anchor_grid, grid_points,
max_delta, scale_xy):
"""Wrapper for _scale_boxes to implement a custom gradient."""
(scaler, scaled_box, pred_box) = _scale_boxes(encoded_boxes, width, height,
anchor_grid, grid_points,
scale_xy)
def delta(unused_dy_scaler, dy_scaled, dy):
dy_xy, dy_wh = tf.split(dy, 2, axis=-1)
dy_xy_, dy_wh_ = tf.split(dy_scaled, 2, axis=-1)
# add all the gradients that may have been applied to the
# boxes and those that have been applied to the width and height
dy_wh += dy_wh_
dy_xy += dy_xy_
# propagate the exponential applied to the width and height in
# order to ensure the gradient propagated is of the correct
# magnitude
pred_wh = encoded_boxes[..., 2:4]
dy_wh *= tf.math.exp(pred_wh)
dbox = tf.concat([dy_xy, dy_wh], axis=-1)
# apply the gradient clipping to xy and wh
dbox = math_ops.rm_nan_inf(dbox)
delta = tf.cast(max_delta, dbox.dtype)
dbox = tf.clip_by_value(dbox, -delta, delta)
return dbox, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
return (scaler, scaled_box, pred_box), delta
def _new_coord_scale_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, scale_xy):
"""Decodes models boxes by squaring and scaling the width and height maps."""
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
scale_xy = tf.cast(scale_xy, pred_xy.dtype)
# apply the sigmoid
pred_xy = tf.math.sigmoid(pred_xy)
pred_wh = tf.math.sigmoid(pred_wh)
# scale the xy offset predictions according to the config
pred_xy = pred_xy * scale_xy - 0.5 * (scale_xy - 1)
# find the true offset from the grid points and the scaler
# where the grid points are the relative offset of each pixel with
# in the image
box_xy = grid_points + pred_xy
# decode the widht and height of the boxes and correlate them
# to the anchor boxes
box_wh = (2 * pred_wh)**2 * anchor_grid
# build the final boxes
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
pred_box = scaled_box / scaler
# shift scaled boxes
scaled_box = tf.concat([pred_xy, box_wh], axis=-1)
return (scaler, scaled_box, pred_box)
@tf.custom_gradient
def _darknet_new_coord_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, max_delta, scale_xy):
"""Wrapper for _new_coord_scale_boxes to implement a custom gradient."""
(scaler, scaled_box,
pred_box) = _new_coord_scale_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, scale_xy)
def delta(unused_dy_scaler, dy_scaled, dy):
dy_xy, dy_wh = tf.split(dy, 2, axis=-1)
dy_xy_, dy_wh_ = tf.split(dy_scaled, 2, axis=-1)
# add all the gradients that may have been applied to the
# boxes and those that have been applied to the width and height
dy_wh += dy_wh_
dy_xy += dy_xy_
dbox = tf.concat([dy_xy, dy_wh], axis=-1)
# apply the gradient clipping to xy and wh
dbox = math_ops.rm_nan_inf(dbox)
delta = tf.cast(max_delta, dbox.dtype)
dbox = tf.clip_by_value(dbox, -delta, delta)
return dbox, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
return (scaler, scaled_box, pred_box), delta
def _anchor_free_scale_boxes(encoded_boxes,
width,
height,
stride,
grid_points,
darknet=False):
"""Decode models boxes using FPN stride under anchor free conditions."""
del darknet
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
# scale the offsets and add them to the grid points or a tensor that is
# the realtive location of each pixel
box_xy = (grid_points + pred_xy)
# scale the width and height of the predictions and corlate them
# to anchor boxes
box_wh = tf.math.exp(pred_wh)
# build the final predicted box
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
# properly scaling boxes gradeints
scaled_box = scaled_box * tf.cast(stride, scaled_box.dtype)
pred_box = scaled_box / tf.cast(scaler * stride, scaled_box.dtype)
return (scaler, scaled_box, pred_box)
def get_predicted_box(width,
height,
encoded_boxes,
anchor_grid,
grid_points,
scale_xy,
stride,
darknet=False,
box_type='original',
max_delta=np.inf):
"""Decodes the predicted boxes from the model format to a usable format.
This function decodes the model outputs into the [x, y, w, h] format for
use in the loss function as well as for use within the detection generator.
Args:
width: A `float` scalar indicating the width of the prediction layer.
height: A `float` scalar indicating the height of the prediction layer
encoded_boxes: A `Tensor` of shape [..., height, width, 4] holding encoded
boxes.
anchor_grid: A `Tensor` of shape [..., 1, 1, 2] holding the anchor boxes
organized for box decoding, box width and height.
grid_points: A `Tensor` of shape [..., height, width, 2] holding the anchor
boxes for decoding the box centers.
scale_xy: A `float` scaler used to indicate the range for each center
outside of its given [..., i, j, 4] index, where i and j are indexing
pixels along the width and height of the predicted output map.
stride: An `int` defining the amount of down stride realtive to the input
image.
darknet: A `bool` used to select between custom gradient and default
autograd.
box_type: An `str` indicating the type of box encoding that is being used.
max_delta: A `float` scaler used for gradient clipping in back propagation.
Returns:
scaler: A `Tensor` of shape [4] returned to allow the scaling of the ground
truth boxes to be of the same magnitude as the decoded predicted boxes.
scaled_box: A `Tensor` of shape [..., height, width, 4] with the predicted
boxes.
pred_box: A `Tensor` of shape [..., height, width, 4] with the predicted
boxes divided by the scaler parameter used to put all boxes in the [0, 1]
range.
"""
if box_type == 'anchor_free':
(scaler, scaled_box, pred_box) = _anchor_free_scale_boxes(
encoded_boxes, width, height, stride, grid_points, darknet=darknet)
elif darknet:
# pylint:disable=unbalanced-tuple-unpacking
# if we are using the darknet loss we shoud nto propagate the
# decoding of the box
if box_type == 'scaled':
(scaler, scaled_box,
pred_box) = _darknet_new_coord_boxes(encoded_boxes, width, height,
anchor_grid, grid_points, max_delta,
scale_xy)
else:
(scaler, scaled_box,
pred_box) = _darknet_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, max_delta, scale_xy)
else:
# if we are using the scaled loss we should propagate the decoding of
# the boxes
if box_type == 'scaled':
(scaler, scaled_box,
pred_box) = _new_coord_scale_boxes(encoded_boxes, width, height,
anchor_grid, grid_points, scale_xy)
else:
(scaler, scaled_box, pred_box) = _scale_boxes(encoded_boxes, width,
height, anchor_grid,
grid_points, scale_xy)
return (scaler, scaled_box, pred_box)
| 23,482 | 36.097946 | 80 | py |
models | models-master/official/projects/yolo/ops/preprocessing_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops for yolo."""
import random
import numpy as np
import tensorflow as tf
from official.vision.ops import augment
from official.vision.ops import box_ops as bbox_ops
PAD_VALUE = 114
GLOBAL_SEED_SET = False
def set_random_seeds(seed=0):
"""Sets all accessible global seeds to properly apply randomization.
This is not the same as passing the seed as a variable to each call
to tf.random.For more, see the documentation for tf.random on the tensorflow
website https://www.tensorflow.org/api_docs/python/tf/random/set_seed. Note
that passing the seed to each random number generator will not give you the
expected behavior if you use more than one generator in a single function.
Args:
seed: `Optional[int]` representing the seed you want to use.
"""
if seed is not None:
global GLOBAL_SEED_SET
random.seed(seed)
GLOBAL_SEED_SET = True
tf.random.set_seed(seed)
np.random.seed(seed)
def random_uniform_strong(minval,
maxval,
dtype=tf.float32,
seed=None,
shape=None):
"""A unified function for consistent random number generation.
Equivalent to tf.random.uniform, except that minval and maxval are flipped if
minval is greater than maxval. Seed Safe random number generator.
Args:
minval: An `int` for a lower or upper endpoint of the interval from which to
choose the random number.
maxval: An `int` for the other endpoint.
dtype: The output type of the tensor.
seed: An `int` used to set the seed.
shape: List or 1D tf.Tensor, output shape of the random generator.
Returns:
A random tensor of type `dtype` that falls between `minval` and `maxval`
excluding the larger one.
"""
if GLOBAL_SEED_SET:
seed = None
if minval > maxval:
minval, maxval = maxval, minval
return tf.random.uniform(
shape=shape or [], minval=minval, maxval=maxval, seed=seed, dtype=dtype)
def random_scale(val, dtype=tf.float32, seed=None):
"""Generates a random number for scaling a parameter by multiplication.
Generates a random number for the scale. Half of the time, the value is
between [1.0, val) with uniformly distributed probability. In the other half,
the value is the reciprocal of this value. The function is identical to the
one in the original implementation:
https://github.com/AlexeyAB/darknet/blob/a3714d0a/src/utils.c#L708-L713
Args:
val: A float representing the maximum scaling allowed.
dtype: The output type of the tensor.
seed: An `int` used to set the seed.
Returns:
The random scale.
"""
scale = random_uniform_strong(1.0, val, dtype=dtype, seed=seed)
do_ret = random_uniform_strong(minval=0, maxval=2, dtype=tf.int32, seed=seed)
if do_ret == 1:
return scale
return 1.0 / scale
def pad_max_instances(value, instances, pad_value=0, pad_axis=0):
"""Pad or clip the tensor value to a fixed length along a given axis.
Pads a dimension of the tensor to have a maximum number of instances filling
additional entries with the `pad_value`. Allows for selection of the padding
axis.
Args:
value: An input tensor.
instances: An `int` representing the maximum number of instances.
pad_value: An `int` representing the value used for padding until the
maximum number of instances is obtained.
pad_axis: An `int` representing the axis index to pad.
Returns:
The output tensor whose dimensions match the input tensor except with the
size along the `pad_axis` replaced by `instances`.
"""
# get the real shape of value
shape = tf.shape(value)
# compute the padding axis
if pad_axis < 0:
pad_axis = tf.rank(value) + pad_axis
# determin how much of the tensor value to keep
dim1 = shape[pad_axis]
take = tf.math.reduce_min([instances, dim1])
value, _ = tf.split(value, [take, -1], axis=pad_axis)
# pad the clipped tensor to the right shape
pad = tf.convert_to_tensor([tf.math.reduce_max([instances - dim1, 0])])
nshape = tf.concat([shape[:pad_axis], pad, shape[(pad_axis + 1):]], axis=0)
pad_tensor = tf.fill(nshape, tf.cast(pad_value, dtype=value.dtype))
value = tf.concat([value, pad_tensor], axis=pad_axis)
if isinstance(instances, int):
vshape = value.get_shape().as_list()
vshape[pad_axis] = instances
value.set_shape(vshape)
return value
def get_image_shape(image):
"""Consistently gets the width and height of the image.
Gets the shape of the image regardless of if the image is in the
(batch_size, x, y, c) format or the (x, y, c) format.
Args:
image: A tensor who has either 3 or 4 dimensions.
Returns:
A tuple (height, width), where height is the height of the image
and width is the width of the image.
"""
shape = tf.shape(image)
if shape.get_shape().as_list()[0] == 4:
width = shape[2]
height = shape[1]
else:
width = shape[1]
height = shape[0]
return height, width
def _augment_hsv_darknet(image, rh, rs, rv, seed=None):
"""Randomize the hue, saturation, and brightness via the darknet method."""
if rh > 0.0:
deltah = random_uniform_strong(-rh, rh, seed=seed)
image = tf.image.adjust_hue(image, deltah)
if rs > 0.0:
deltas = random_scale(rs, seed=seed)
image = tf.image.adjust_saturation(image, deltas)
if rv > 0.0:
deltav = random_scale(rv, seed=seed)
image *= tf.cast(deltav, image.dtype)
# clip the values of the image between 0.0 and 1.0
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def _augment_hsv_torch(image, rh, rs, rv, seed=None):
"""Randomize the hue, saturation, and brightness via the pytorch method."""
dtype = image.dtype
image = tf.cast(image, tf.float32)
image = tf.image.rgb_to_hsv(image)
gen_range = tf.cast([rh, rs, rv], image.dtype)
scale = tf.cast([180, 255, 255], image.dtype)
r = random_uniform_strong(
-1, 1, shape=[3], dtype=image.dtype, seed=seed) * gen_range + 1
image = tf.math.floor(tf.cast(image, scale.dtype) * scale)
image = tf.math.floor(tf.cast(image, r.dtype) * r)
h, s, v = tf.split(image, 3, axis=-1)
h = h % 180
s = tf.clip_by_value(s, 0, 255)
v = tf.clip_by_value(v, 0, 255)
image = tf.concat([h, s, v], axis=-1)
image = tf.cast(image, scale.dtype) / scale
image = tf.image.hsv_to_rgb(image)
return tf.cast(image, dtype)
def image_rand_hsv(image, rh, rs, rv, seed=None, darknet=False):
"""Randomly alters the hue, saturation, and brightness of an image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
rh: `float32` used to indicate the maximum delta that can be multiplied to
the hue.
rs: `float32` used to indicate the maximum delta that can be multiplied to
the saturation.
rv: `float32` used to indicate the maximum delta that can be multiplied to
the brightness.
seed: `Optional[int]` for the seed to use in the random number generation.
darknet: `bool` indicating whether the model was originally built in the
Darknet or PyTorch library.
Returns:
The HSV altered image in the same datatype as the input image.
"""
if darknet:
image = _augment_hsv_darknet(image, rh, rs, rv, seed=seed)
else:
image = _augment_hsv_torch(image, rh, rs, rv, seed=seed)
return image
def mosaic_cut(image, original_width, original_height, width, height, center,
ptop, pleft, pbottom, pright, shiftx, shifty):
"""Generates a random center location to use for the mosaic operation.
Given a center location, cuts the input image into a slice that will be
concatenated with other slices with the same center in order to construct
a final mosaicked image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
original_width: `float` value indicating the original width of the image.
original_height: `float` value indicating the original height of the image.
width: `float` value indicating the final width of the image.
height: `float` value indicating the final height of the image.
center: `float` value indicating the desired center of the final patched
image.
ptop: `float` value indicating the top of the image without padding.
pleft: `float` value indicating the left of the image without padding.
pbottom: `float` value indicating the bottom of the image without padding.
pright: `float` value indicating the right of the image without padding.
shiftx: `float` 0.0 or 1.0 value indicating if the image is on the left or
right.
shifty: `float` 0.0 or 1.0 value indicating if the image is at the top or
bottom.
Returns:
image: The cropped image in the same datatype as the input image.
crop_info: `float` tensor that is applied to the boxes in order to select
the boxes still contained within the image.
"""
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
with tf.name_scope('mosaic_cut'):
center = tf.cast(center, width.dtype)
zero = tf.cast(0.0, width.dtype)
cut_x, cut_y = center[1], center[0]
# Select the crop of the image to use
left_shift = tf.minimum(
tf.minimum(cut_x, tf.maximum(zero, -pleft * width / original_width)),
width - cut_x)
top_shift = tf.minimum(
tf.minimum(cut_y, tf.maximum(zero, -ptop * height / original_height)),
height - cut_y)
right_shift = tf.minimum(
tf.minimum(width - cut_x,
tf.maximum(zero, -pright * width / original_width)), cut_x)
bot_shift = tf.minimum(
tf.minimum(height - cut_y,
tf.maximum(zero, -pbottom * height / original_height)),
cut_y)
(left_shift, top_shift, right_shift, bot_shift,
zero) = cast([left_shift, top_shift, right_shift, bot_shift, zero],
tf.float32)
# Build a crop offset and a crop size tensor to use for slicing.
crop_offset = [zero, zero, zero]
crop_size = [zero - 1, zero - 1, zero - 1]
if shiftx == 0.0 and shifty == 0.0:
crop_offset = [top_shift, left_shift, zero]
crop_size = [cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 0.0:
crop_offset = [top_shift, cut_x - right_shift, zero]
crop_size = [cut_y, width - cut_x, zero - 1]
elif shiftx == 0.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, left_shift, zero]
crop_size = [height - cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, cut_x - right_shift, zero]
crop_size = [height - cut_y, width - cut_x, zero - 1]
# Contain and crop the image.
ishape = tf.cast(tf.shape(image)[:2], crop_size[0].dtype)
crop_size[0] = tf.minimum(crop_size[0], ishape[0])
crop_size[1] = tf.minimum(crop_size[1], ishape[1])
crop_offset = tf.cast(crop_offset, tf.int32)
crop_size = tf.cast(crop_size, tf.int32)
image = tf.slice(image, crop_offset, crop_size)
crop_info = tf.stack([
tf.cast(ishape, tf.float32),
tf.cast(tf.shape(image)[:2], dtype=tf.float32),
tf.ones_like(ishape, dtype=tf.float32),
tf.cast(crop_offset[:2], tf.float32)
])
return image, crop_info
def resize_and_jitter_image(image,
desired_size,
jitter=0.0,
letter_box=None,
random_pad=True,
crop_only=False,
shiftx=0.5,
shifty=0.5,
cut=None,
method=tf.image.ResizeMethod.BILINEAR,
seed=None):
"""Resize, Pad, and distort a given input image.
Args:
image: a `Tensor` of shape [height, width, 3] representing an image.
desired_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the desired actual output image size.
jitter: an `int` representing the maximum jittering that can be applied to
the image.
letter_box: a `bool` representing if letterboxing should be applied.
random_pad: a `bool` representing if random padding should be applied.
crop_only: a `bool` representing if only cropping will be applied.
shiftx: a `float` indicating if the image is in the left or right.
shifty: a `float` value indicating if the image is in the top or bottom.
cut: a `float` value indicating the desired center of the final patched
image.
method: function to resize input image to scaled image.
seed: seed for random scale jittering.
Returns:
image_: a `Tensor` of shape [height, width, 3] where [height, width]
equals to `desired_size`.
infos: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
cast([original_width, original_height, width, height, ptop, pleft, pbottom,
pright], tf.float32): a `Tensor` containing the information of the image
andthe applied preprocessing.
"""
def intersection(a, b):
"""Finds the intersection between 2 crops."""
minx = tf.maximum(a[0], b[0])
miny = tf.maximum(a[1], b[1])
maxx = tf.minimum(a[2], b[2])
maxy = tf.minimum(a[3], b[3])
return tf.convert_to_tensor([minx, miny, maxx, maxy])
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
if jitter > 0.5 or jitter < 0:
raise ValueError('maximum change in aspect ratio must be between 0 and 0.5')
with tf.name_scope('resize_and_jitter_image'):
# Cast all parameters to a usable float data type.
jitter = tf.cast(jitter, tf.float32)
original_dtype, original_dims = image.dtype, tf.shape(image)[:2]
# original width, original height, desigered width, desired height
original_width, original_height, width, height = cast(
[original_dims[1], original_dims[0], desired_size[1], desired_size[0]],
tf.float32)
# Compute the random delta width and height etc. and randomize the
# location of the corner points.
jitter_width = original_width * jitter
jitter_height = original_height * jitter
pleft = random_uniform_strong(
-jitter_width, jitter_width, jitter_width.dtype, seed=seed)
pright = random_uniform_strong(
-jitter_width, jitter_width, jitter_width.dtype, seed=seed)
ptop = random_uniform_strong(
-jitter_height, jitter_height, jitter_height.dtype, seed=seed)
pbottom = random_uniform_strong(
-jitter_height, jitter_height, jitter_height.dtype, seed=seed)
# Letter box the image.
if letter_box:
(image_aspect_ratio,
input_aspect_ratio) = original_width / original_height, width / height
distorted_aspect = image_aspect_ratio / input_aspect_ratio
delta_h, delta_w = 0.0, 0.0
pullin_h, pullin_w = 0.0, 0.0
if distorted_aspect > 1:
delta_h = ((original_width / input_aspect_ratio) - original_height) / 2
else:
delta_w = ((original_height * input_aspect_ratio) - original_width) / 2
ptop = ptop - delta_h - pullin_h
pbottom = pbottom - delta_h - pullin_h
pright = pright - delta_w - pullin_w
pleft = pleft - delta_w - pullin_w
# Compute the width and height to crop or pad too, and clip all crops to
# to be contained within the image.
swidth = original_width - pleft - pright
sheight = original_height - ptop - pbottom
src_crop = intersection([ptop, pleft, sheight + ptop, swidth + pleft],
[0, 0, original_height, original_width])
# Random padding used for mosaic.
h_ = src_crop[2] - src_crop[0]
w_ = src_crop[3] - src_crop[1]
if random_pad:
rmh = tf.maximum(0.0, -ptop)
rmw = tf.maximum(0.0, -pleft)
else:
rmw = (swidth - w_) * shiftx
rmh = (sheight - h_) * shifty
# Cast cropping params to usable dtype.
src_crop = tf.cast(src_crop, tf.int32)
# Compute padding parmeters.
dst_shape = [rmh, rmw, rmh + h_, rmw + w_]
ptop, pleft, pbottom, pright = dst_shape
pad = dst_shape * tf.cast([1, 1, -1, -1], ptop.dtype)
pad += tf.cast([0, 0, sheight, swidth], ptop.dtype)
pad = tf.cast(pad, tf.int32)
infos = []
# Crop the image to desired size.
cropped_image = tf.slice(
image, [src_crop[0], src_crop[1], 0],
[src_crop[2] - src_crop[0], src_crop[3] - src_crop[1], -1])
crop_info = tf.stack([
tf.cast(original_dims, tf.float32),
tf.cast(tf.shape(cropped_image)[:2], dtype=tf.float32),
tf.ones_like(original_dims, dtype=tf.float32),
tf.cast(src_crop[:2], tf.float32)
])
infos.append(crop_info)
if crop_only:
if not letter_box:
h_, w_ = cast(get_image_shape(cropped_image), width.dtype)
width = tf.cast(tf.round((w_ * width) / swidth), tf.int32)
height = tf.cast(tf.round((h_ * height) / sheight), tf.int32)
cropped_image = tf.image.resize(
cropped_image, [height, width], method=method)
cropped_image = tf.cast(cropped_image, original_dtype)
return cropped_image, infos, cast([
original_width, original_height, width, height, ptop, pleft, pbottom,
pright
], tf.int32)
# Pad the image to desired size.
image_ = tf.pad(
cropped_image, [[pad[0], pad[2]], [pad[1], pad[3]], [0, 0]],
constant_values=PAD_VALUE)
# Pad and scale info
isize = tf.cast(tf.shape(image_)[:2], dtype=tf.float32)
osize = tf.cast((desired_size[0], desired_size[1]), dtype=tf.float32)
pad_info = tf.stack([
tf.cast(tf.shape(cropped_image)[:2], tf.float32),
osize,
osize/isize,
(-tf.cast(pad[:2], tf.float32)*osize/isize)
])
infos.append(pad_info)
temp = tf.shape(image_)[:2]
cond = temp > tf.cast(desired_size, temp.dtype)
if tf.reduce_any(cond):
size = tf.cast(desired_size, temp.dtype)
size = tf.where(cond, size, temp)
image_ = tf.image.resize(
image_, (size[0], size[1]), method=tf.image.ResizeMethod.AREA)
image_ = tf.cast(image_, original_dtype)
image_ = tf.image.resize(
image_, (desired_size[0], desired_size[1]),
method=tf.image.ResizeMethod.BILINEAR,
antialias=False)
image_ = tf.cast(image_, original_dtype)
if cut is not None:
image_, crop_info = mosaic_cut(image_, original_width, original_height,
width, height, cut, ptop, pleft, pbottom,
pright, shiftx, shifty)
infos.append(crop_info)
return image_, infos, cast([
original_width, original_height, width, height, ptop, pleft, pbottom,
pright
], tf.float32)
def _build_transform(image,
perspective=0.00,
degrees=0.0,
scale_min=1.0,
scale_max=1.0,
translate=0.0,
random_pad=False,
desired_size=None,
seed=None):
"""Builds a unified affine transformation to spatially augment the image."""
height, width = get_image_shape(image)
ch = height = tf.cast(height, tf.float32)
cw = width = tf.cast(width, tf.float32)
deg_to_rad = lambda x: tf.cast(x, tf.float32) * np.pi / 180.0
if desired_size is not None:
desired_size = tf.cast(desired_size, tf.float32)
ch = desired_size[0]
cw = desired_size[1]
# Compute the center of the image in the output resulution.
center = tf.eye(3, dtype=tf.float32)
center = tf.tensor_scatter_nd_update(center, [[0, 2], [1, 2]],
[-cw / 2, -ch / 2])
center_boxes = tf.tensor_scatter_nd_update(center, [[0, 2], [1, 2]],
[cw / 2, ch / 2])
# Compute a random rotation to apply.
rotation = tf.eye(3, dtype=tf.float32)
a = deg_to_rad(random_uniform_strong(-degrees, degrees, seed=seed))
cos = tf.math.cos(a)
sin = tf.math.sin(a)
rotation = tf.tensor_scatter_nd_update(rotation,
[[0, 0], [0, 1], [1, 0], [1, 1]],
[cos, -sin, sin, cos])
rotation_boxes = tf.tensor_scatter_nd_update(rotation,
[[0, 0], [0, 1], [1, 0], [1, 1]],
[cos, sin, -sin, cos])
# Compute a random prespective change to apply.
prespective_warp = tf.eye(3)
px = random_uniform_strong(-perspective, perspective, seed=seed)
py = random_uniform_strong(-perspective, perspective, seed=seed)
prespective_warp = tf.tensor_scatter_nd_update(prespective_warp,
[[2, 0], [2, 1]], [px, py])
prespective_warp_boxes = tf.tensor_scatter_nd_update(prespective_warp,
[[2, 0], [2, 1]],
[-px, -py])
# Compute a random scaling to apply.
scale = tf.eye(3, dtype=tf.float32)
s = random_uniform_strong(scale_min, scale_max, seed=seed)
scale = tf.tensor_scatter_nd_update(scale, [[0, 0], [1, 1]], [1 / s, 1 / s])
scale_boxes = tf.tensor_scatter_nd_update(scale, [[0, 0], [1, 1]], [s, s])
# Compute a random Translation to apply.
translation = tf.eye(3)
if (random_pad and height * s < ch and width * s < cw):
# The image is contained within the image and arbitrarily translated to
# locations with in the image.
center = center_boxes = tf.eye(3, dtype=tf.float32)
tx = random_uniform_strong(-1, 0, seed=seed) * (cw / s - width)
ty = random_uniform_strong(-1, 0, seed=seed) * (ch / s - height)
else:
# The image can be translated outside of the output resolution window
# but the image is translated relative to the output resolution not the
# input image resolution.
tx = random_uniform_strong(0.5 - translate, 0.5 + translate, seed=seed)
ty = random_uniform_strong(0.5 - translate, 0.5 + translate, seed=seed)
# Center and Scale the image such that the window of translation is
# contained to the output resolution.
dx, dy = (width - cw / s) / width, (height - ch / s) / height
sx, sy = 1 - dx, 1 - dy
bx, by = dx / 2, dy / 2
tx, ty = bx + (sx * tx), by + (sy * ty)
# Scale the translation to width and height of the image.
tx *= width
ty *= height
translation = tf.tensor_scatter_nd_update(translation, [[0, 2], [1, 2]],
[tx, ty])
translation_boxes = tf.tensor_scatter_nd_update(translation, [[0, 2], [1, 2]],
[-tx, -ty])
# Use repeated matric multiplications to combine all the image transforamtions
# into a single unified augmentation operation M is applied to the image
# Mb is to apply to the boxes. The order of matrix multiplication is
# important. First, Translate, then Scale, then Rotate, then Center, then
# finally alter the Prepsective.
affine = (translation @ scale @ rotation @ center @ prespective_warp)
affine_boxes = (
prespective_warp_boxes @ center_boxes @ rotation_boxes @ scale_boxes
@ translation_boxes)
return affine, affine_boxes, s
def affine_warp_image(image,
desired_size,
perspective=0.00,
degrees=0.0,
scale_min=1.0,
scale_max=1.0,
translate=0.0,
random_pad=False,
seed=None):
"""Applies random spatial augmentation to the image.
Args:
image: A `Tensor` for the image.
desired_size: A `tuple` for desired output image size.
perspective: An `int` for the maximum that can be applied to random
perspective change.
degrees: An `int` for the maximum degrees that can be applied to random
rotation.
scale_min: An `int` for the minimum scaling factor that can be applied to
random scaling.
scale_max: An `int` for the maximum scaling factor that can be applied to
random scaling.
translate: An `int` for the maximum translation that can be applied to
random translation.
random_pad: A `bool` for using random padding.
seed: An `Optional[int]` for the seed to use in random number generation.
Returns:
image: A `Tensor` representing the augmented image.
affine_matrix: A `Tensor` representing the augmenting matrix for the image.
affine_info: A `List` containing the size of the original image, the desired
output_size of the image and the augmenting matrix for the boxes.
"""
# Build an image transformation matrix.
image_size = tf.cast(get_image_shape(image), tf.float32)
affine_matrix, affine_boxes, _ = _build_transform(
image,
perspective=perspective,
degrees=degrees,
scale_min=scale_min,
scale_max=scale_max,
translate=translate,
random_pad=random_pad,
desired_size=desired_size,
seed=seed)
affine = tf.reshape(affine_matrix, [-1])
affine = tf.cast(affine[:-1], tf.float32)
# Apply the transformation to image.
image = augment.transform(
image,
affine,
fill_value=PAD_VALUE,
output_shape=desired_size,
interpolation='bilinear',
fill_mode='constant',
)
desired_size = tf.cast(desired_size, tf.float32)
affine_info = [image_size, desired_size, affine_boxes]
return image, affine_matrix, affine_info
def affine_warp_boxes(affine, boxes, output_size, box_history):
"""Applies random rotation, random perspective change and random translation.
and random scaling to the boxes.
Args:
affine: A `Tensor` for the augmenting matrix for the boxes.
boxes: A `Tensor` for the boxes.
output_size: A `list` of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
Returns:
clipped_boxes: A `Tensor` representing the augmented boxes.
box_history: A `Tensor` representing the augmented box_history.
"""
def _get_corners(box):
"""Get the corner of each box as a tuple of (x, y) coordinates."""
ymi, xmi, yma, xma = tf.split(box, 4, axis=-1)
tl = tf.concat([xmi, ymi], axis=-1)
bl = tf.concat([xmi, yma], axis=-1)
tr = tf.concat([xma, ymi], axis=-1)
br = tf.concat([xma, yma], axis=-1)
return tf.concat([tl, bl, tr, br], axis=-1)
def _corners_to_boxes(corner):
"""Convert (x, y) corners back into boxes [ymin, xmin, ymax, xmax]."""
corner = tf.reshape(corner, [-1, 4, 2])
y = corner[..., 1]
x = corner[..., 0]
y_min = tf.reduce_min(y, axis=-1)
x_min = tf.reduce_min(x, axis=-1)
y_max = tf.reduce_max(y, axis=-1)
x_max = tf.reduce_max(x, axis=-1)
return tf.stack([y_min, x_min, y_max, x_max], axis=-1)
def _aug_boxes(affine_matrix, box):
"""Apply an affine transformation matrix M to the boxes augment boxes."""
corners = _get_corners(box)
corners = tf.reshape(corners, [-1, 4, 2])
z = tf.expand_dims(tf.ones_like(corners[..., 1]), axis=-1)
corners = tf.concat([corners, z], axis=-1)
corners = tf.transpose(
tf.matmul(affine_matrix, corners, transpose_b=True), perm=(0, 2, 1))
corners, p = tf.split(corners, [2, 1], axis=-1)
corners /= p
corners = tf.reshape(corners, [-1, 8])
box = _corners_to_boxes(corners)
return box
boxes = _aug_boxes(affine, boxes)
box_history = _aug_boxes(affine, box_history)
clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)
return clipped_boxes, box_history
def boxes_candidates(clipped_boxes,
box_history,
wh_thr=2,
ar_thr=20,
area_thr=0.1):
"""Filters the boxes that don't satisfy the width/height and area constraints.
Args:
clipped_boxes: A `Tensor` for the boxes.
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
wh_thr: An `int` for the width/height threshold.
ar_thr: An `int` for the aspect ratio threshold.
area_thr: An `int` for the area threshold.
Returns:
indices[:, 0]: A `Tensor` representing valid boxes after filtering.
"""
if area_thr == 0.0:
wh_thr = 0
ar_thr = np.inf
area_thr = tf.math.abs(area_thr)
# Get the scaled and shifted heights of the original
# unclipped boxes.
og_height = tf.maximum(box_history[:, 2] - box_history[:, 0], 0.0)
og_width = tf.maximum(box_history[:, 3] - box_history[:, 1], 0.0)
# Get the scaled and shifted heights of the clipped boxes.
clipped_height = tf.maximum(clipped_boxes[:, 2] - clipped_boxes[:, 0], 0.0)
clipped_width = tf.maximum(clipped_boxes[:, 3] - clipped_boxes[:, 1], 0.0)
# Determine the aspect ratio of the clipped boxes.
ar = tf.maximum(clipped_width / (clipped_height + 1e-16),
clipped_height / (clipped_width + 1e-16))
# Ensure the clipped width adn height are larger than a preset threshold.
conda = clipped_width >= wh_thr
condb = clipped_height >= wh_thr
# Ensure the area of the clipped box is larger than the area threshold.
area = (clipped_height * clipped_width) / (og_width * og_height + 1e-16)
condc = area > area_thr
# Ensure the aspect ratio is not too extreme.
condd = ar < ar_thr
cond = tf.expand_dims(
tf.logical_and(
tf.logical_and(conda, condb), tf.logical_and(condc, condd)),
axis=-1)
# Set all the boxes that fail the test to be equal to zero.
indices = tf.where(cond)
return indices[:, 0]
def resize_and_crop_boxes(boxes, image_scale, output_size, offset, box_history):
"""Resizes and crops the boxes.
Args:
boxes: A `Tensor` for the boxes.
image_scale: A `Tensor` for the scaling factor of the image.
output_size: A `list` of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
offset: A `Tensor` for how much translation was applied to the image.
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
Returns:
clipped_boxes: A `Tensor` representing the augmented boxes.
box_history: A `Tensor` representing the augmented box_history.
"""
# Shift and scale the input boxes.
boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Check the hitory of the boxes.
box_history *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
box_history -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Clip the shifted and scaled boxes.
clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)
return clipped_boxes, box_history
def transform_and_clip_boxes(boxes,
infos,
affine=None,
shuffle_boxes=False,
area_thresh=0.1,
seed=None,
filter_and_clip_boxes=True):
"""Clips and cleans the boxes.
Args:
boxes: A `Tensor` for the boxes.
infos: A `list` that contains the image infos.
affine: A `list` that contains parameters for resize and crop.
shuffle_boxes: A `bool` for shuffling the boxes.
area_thresh: An `int` for the area threshold.
seed: seed for random number generation.
filter_and_clip_boxes: A `bool` for filtering and clipping the boxes to
[0, 1].
Returns:
boxes: A `Tensor` representing the augmented boxes.
ind: A `Tensor` valid box indices.
"""
# Clip and clean boxes.
def get_valid_boxes(boxes):
"""Get indices for non-empty boxes."""
# Convert the boxes to center width height formatting.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
base = tf.logical_and(tf.greater(height, 0), tf.greater(width, 0))
return base
# Initialize history to track operation applied to boxes
box_history = boxes
# Make sure all boxes are valid to start, clip to [0, 1] and get only the
# valid boxes.
output_size = None
if filter_and_clip_boxes:
boxes = tf.math.maximum(tf.math.minimum(boxes, 1.0), 0.0)
cond = get_valid_boxes(boxes)
if infos is None:
infos = []
for info in infos:
# Denormalize the boxes.
boxes = bbox_ops.denormalize_boxes(boxes, info[0])
box_history = bbox_ops.denormalize_boxes(box_history, info[0])
# Shift and scale all boxes, and keep track of box history with no
# box clipping, history is used for removing boxes that have become
# too small or exit the image area.
(boxes, box_history) = resize_and_crop_boxes(
boxes, info[2, :], info[1, :], info[3, :], box_history=box_history)
# Get all the boxes that still remain in the image and store
# in a bit vector for later use.
cond = tf.logical_and(get_valid_boxes(boxes), cond)
# Normalize the boxes to [0, 1].
output_size = info[1]
boxes = bbox_ops.normalize_boxes(boxes, output_size)
box_history = bbox_ops.normalize_boxes(box_history, output_size)
if affine is not None:
# Denormalize the boxes.
boxes = bbox_ops.denormalize_boxes(boxes, affine[0])
box_history = bbox_ops.denormalize_boxes(box_history, affine[0])
# Clipped final boxes.
(boxes, box_history) = affine_warp_boxes(
affine[2], boxes, affine[1], box_history=box_history)
# Get all the boxes that still remain in the image and store
# in a bit vector for later use.
cond = tf.logical_and(get_valid_boxes(boxes), cond)
# Normalize the boxes to [0, 1].
output_size = affine[1]
boxes = bbox_ops.normalize_boxes(boxes, output_size)
box_history = bbox_ops.normalize_boxes(box_history, output_size)
# Remove the bad boxes.
boxes *= tf.cast(tf.expand_dims(cond, axis=-1), boxes.dtype)
# Threshold the existing boxes.
if filter_and_clip_boxes:
if output_size is not None:
boxes_ = bbox_ops.denormalize_boxes(boxes, output_size)
box_history_ = bbox_ops.denormalize_boxes(box_history, output_size)
inds = boxes_candidates(boxes_, box_history_, area_thr=area_thresh)
else:
inds = boxes_candidates(
boxes, box_history, wh_thr=0.0, area_thr=area_thresh)
# Select and gather the good boxes.
if shuffle_boxes:
inds = tf.random.shuffle(inds, seed=seed)
else:
inds = bbox_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, inds)
return boxes, inds
| 36,299 | 37.412698 | 80 | py |
models | models-master/official/projects/yolo/ops/initializer_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo initializer ops."""
import tensorflow as tf
def pytorch_kernel_initializer(kernel_initializer):
"""Prepare kernel weights initializer to match PyTorch implementation."""
if kernel_initializer == 'VarianceScaling':
return tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform'
)
return kernel_initializer
| 981 | 35.37037 | 75 | py |
models | models-master/official/projects/yolo/losses/yolo_loss_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo heads."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.losses import yolo_loss
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(True),
(False),
)
def test_loss_init(self, scaled):
"""Test creation of YOLO family models."""
def inpdict(input_shape, dtype=tf.float32):
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=dtype)
return inputs
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 255],
'4': [1, 26, 26, 255],
'5': [1, 13, 13, 255]
}
classes = 80
anchors = {
'3': [[12.0, 19.0], [31.0, 46.0], [96.0, 54.0]],
'4': [[46.0, 114.0], [133.0, 127.0], [79.0, 225.0]],
'5': [[301.0, 150.0], [172.0, 286.0], [348.0, 340.0]]
}
keys = ['3', '4', '5']
path_strides = {key: 2**int(key) for key in keys}
loss = yolo_loss.YoloLoss(
keys,
classes,
anchors,
path_strides=path_strides,
truth_thresholds={key: 1.0 for key in keys},
ignore_thresholds={key: 0.7 for key in keys},
loss_types={key: 'ciou' for key in keys},
iou_normalizers={key: 0.05 for key in keys},
cls_normalizers={key: 0.5 for key in keys},
object_normalizers={key: 1.0 for key in keys},
objectness_smooths={key: 1.0 for key in keys},
box_types={key: 'scaled' for key in keys},
scale_xys={key: 2.0 for key in keys},
max_deltas={key: 30.0 for key in keys},
label_smoothing=0.0,
use_scaled_loss=scaled,
update_on_repeat=True)
count = inpdict({
'3': [1, 52, 52, 3, 1],
'4': [1, 26, 26, 3, 1],
'5': [1, 13, 13, 3, 1]
})
ind = inpdict({
'3': [1, 300, 3],
'4': [1, 300, 3],
'5': [1, 300, 3]
}, tf.int32)
truths = inpdict({'3': [1, 300, 6], '4': [1, 300, 6], '5': [1, 300, 6]})
boxes = tf.ones([1, 300, 4], dtype=tf.float32)
classes = tf.ones([1, 300], dtype=tf.float32)
gt = {
'true_conf': count,
'inds': ind,
'upds': truths,
'bbox': boxes,
'classes': classes
}
_, _, _ = loss(gt, inpdict(input_shape))
if __name__ == '__main__':
tf.test.main()
| 2,998 | 29.292929 | 76 | py |
models | models-master/official/projects/yolo/losses/yolov7_loss.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 loss function."""
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.vision.losses import focal_loss
_LAYER_BALANCE = {
'3': [4.0, 1.0, 0.4],
'5': [4.0, 1.0, 0.25, 0.06, 0.02],
}
def smooth_bce_targets(eps=0.1):
"""Computes positive, negative label smoothing BCE targets.
https://arxiv.org/pdf/1902.04103.pdf equation 3.
Args:
eps: a float number from [0, 1] representing label smoothing factor.
Returns:
Positive and negative targets after label smoothing.
"""
return 1.0 - 0.5 * eps, 0.5 * eps
def merge_labels(labels):
"""Converts the ground-truth labels into loss targets."""
boxes = box_ops.yxyx_to_xcycwh(labels['bbox'])
classes = tf.cast(labels['classes'], boxes.dtype)
return tf.concat([classes[..., None], boxes], axis=-1)
class YoloV7Loss(tf.keras.losses.Loss):
"""YOLOv7 loss function."""
def __init__(
self,
anchors,
strides,
input_size,
alpha=0.25,
gamma=1.5,
box_weight=0.05,
obj_weight=0.7,
cls_weight=0.3,
label_smoothing=0.0,
anchor_threshold=4.0,
iou_mix_ratio=1.0,
num_classes=80,
auto_balance=False,
reduction=tf.keras.losses.Reduction.NONE,
name=None,
):
"""Constructor for YOLOv7 loss.
Follows the implementation here:
https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py#L422
Args:
anchors: a 2D array represents different anchors used at each level.
strides: a 1D array represents the strides. Note that all numbers should
be a power of 2, and they usually start with level 3 and end at level
5 or 7. Therefore, the list should usually be [8, 16, 32] or
[8, 16, 32, 64, 128].
input_size: a list containing the height and width of the input image.
alpha: alpha for focal loss.
gamma: gamma for focal loss. If set to 0, focal loss will be disabled.
box_weight: float weight scalar applied to bounding box loss.
obj_weight: float weight scalar applied to objectness loss.
cls_weight: float weight scalar applied to class loss.
label_smoothing: small float number used to compute positive and negative
targets. If set to 0, the positive targets will be 1 and negative
targets will be 0.
anchor_threshold: threshold for the anchor matching. Larger number allows
more displacements between anchors and targets.
iou_mix_ratio: float ratio to mix the IoU score with the positive target,
which is 1.
num_classes: number of classes.
auto_balance: a boolean flag that indicates whether auto balance should be
used. If used, the default balance factors will automatically update
for each batch.
reduction: Reduction method. Should be set to None at all time as this
loss module always output a loss scalar.
name: Optional name for the loss.
"""
# Loss required fields.
self._num_classes = num_classes
self._num_layers = len(strides)
self._num_anchors = len(anchors[0])
self._anchors = anchors
self._strides = strides
self._input_size = input_size
self._iou_mix_ratio = iou_mix_ratio
# Scale down anchors by the strides to match the feature map.
for i, stride in enumerate(strides):
self._anchors[i] = tf.constant(self._anchors[i], tf.float32) / stride
self._anchor_threshold = anchor_threshold
self._pos_targets, self._neg_targets = smooth_bce_targets(label_smoothing)
if gamma > 0:
self._cls_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='cls_loss')
self._obj_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='obj_loss')
else:
self._cls_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
self._obj_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
# Weight to combine losses
self._box_weight = box_weight
self._obj_weight = obj_weight * input_size[0] / 640 * input_size[1] / 640
self._cls_weight = cls_weight * num_classes / 80
# Layer balance scalar
self._balance = _LAYER_BALANCE[str(self._num_layers)][:]
for i, bal in enumerate(self._balance):
self._balance[i] = tf.constant(bal, tf.float32)
self._auto_balance = auto_balance
assert 16 in strides, (
'Expect level 4 (stride of 16) always exist in the strides, received %s'
% strides
)
self._ssi = list(strides).index(16) if auto_balance else 0 # stride 16 idx
super().__init__(reduction=reduction, name=name)
def call(self, labels, predictions):
labels = merge_labels(labels)
p = {}
for key in predictions:
# [batch_size, num_anchors, height, width, num_classes + boxes + obj]
p[key] = tf.transpose(predictions[key], [0, 3, 1, 2, 4])
cls_loss, box_loss, obj_loss, iou_metric = [tf.zeros(1) for _ in range(4)]
total_num_matchings = tf.zeros(1)
total_num_gts = tf.reduce_sum(tf.cast(labels[..., 0] != -1, tf.float32))
masks, indices, anchors, cls_targets, box_targets = self._build_targets(
labels, p)
batch_size = tf.shape(indices)[0]
layer_shape = [batch_size, self._num_layers, -1]
# [anchor_indices, grid_js, grid_is]
masks = tf.reshape(masks, layer_shape)
indices = tf.reshape(indices, [*layer_shape, 3])
anchors = tf.reshape(anchors, [*layer_shape, 2])
cls_targets = tf.reshape(cls_targets, layer_shape)
box_targets = tf.reshape(box_targets, [*layer_shape, 4])
# Losses
for layer_key, layer_pred in p.items():
i = int(layer_key) - 3
obj_targets = tf.zeros_like(layer_pred[..., 0])
layer_masks = masks[:, i]
num_matchings = tf.reduce_sum(tf.cast(layer_masks, tf.int32))
total_num_matchings += tf.cast(num_matchings, tf.float32)
if num_matchings > 0:
layer_indices = indices[:, i]
batch_indices = tf.tile(
tf.range(batch_size)[:, None], [1, tf.shape(layer_indices)[1]]
)[..., None]
layer_indices = tf.concat([batch_indices, layer_indices], axis=-1)
layer_indices = tf.boolean_mask(layer_indices, layer_masks)
layer_anchors = tf.boolean_mask(anchors[:, i], layer_masks)
layer_cls_targets = tf.boolean_mask(cls_targets[:, i], layer_masks)
layer_box_targets = tf.boolean_mask(box_targets[:, i], layer_masks)
# In the same shape of layer_target.
matched_pred = tf.gather_nd(layer_pred, layer_indices)
pred_xcyc = tf.sigmoid(matched_pred[..., :2]) * 2 - 0.5
pred_wh = (
tf.square(tf.sigmoid(matched_pred[..., 2:4]) * 2) * layer_anchors)
pred_xcycwh = tf.concat([pred_xcyc, pred_wh], axis=-1)
_, ciou = box_ops.compute_ciou(pred_xcycwh, layer_box_targets)
box_loss += tf.reduce_mean(1.0 - ciou)
iou_metric += tf.reduce_mean(ciou)
# Compute classification loss.
if self._num_classes > 1: # cls loss (only if multiple classes)
t = tf.one_hot(
layer_cls_targets,
self._num_classes,
on_value=self._pos_targets,
off_value=self._neg_targets,
)
cls_loss += tf.reduce_mean(
self._cls_loss_fn(t, matched_pred[..., 5:]))
# Compute objectness loss.
iou_ratio = tf.cast(
(1.0 - self._iou_mix_ratio)
+ (self._iou_mix_ratio * tf.maximum(tf.stop_gradient(ciou), 0)),
obj_targets.dtype,
)
obj_targets = tf.tensor_scatter_nd_max(
obj_targets, layer_indices, iou_ratio
)
layer_obj_loss = tf.reduce_mean(
self._obj_loss_fn(obj_targets, layer_pred[..., 4])
)
obj_loss += layer_obj_loss * self._balance[i]
# Updates the balance factor, which is a moving average of previous
# factor at the same level.
if self._auto_balance:
self._balance[i] = self._balance[
i
] * 0.9999 + 0.0001 / tf.stop_gradient(layer_obj_loss)
# Re-balance the factors so that stride at self._ssi always receives 1.
if self._auto_balance:
self._balance = [x / self._balance[self._ssi] for x in self._balance]
box_loss *= self._box_weight
obj_loss *= self._obj_weight
cls_loss *= self._cls_weight
self._box_loss = tf.stop_gradient(box_loss)
self._obj_loss = tf.stop_gradient(obj_loss)
self._cls_loss = tf.stop_gradient(cls_loss)
self._iou = tf.stop_gradient(iou_metric) / self._num_layers
self._num_matchings = tf.stop_gradient(
total_num_matchings) / tf.cast(batch_size, tf.float32)
self._num_gts = tf.stop_gradient(
total_num_gts) / tf.cast(batch_size, tf.float32)
loss = box_loss + obj_loss + cls_loss
return loss * tf.cast(batch_size, loss.dtype)
def _build_targets(self, labels, predictions):
"""Finds three matching anchors for each ground-truth."""
label_shape = tf.shape(labels)
batch_size, max_boxes = label_shape[0], label_shape[1]
masks, indices, anch = [], [], []
cls_targets, box_targets = [], []
anchor_indices = tf.tile(
tf.range(self._num_anchors, dtype=tf.float32)[None, None],
[batch_size, max_boxes, 1],
)
# Append anchor indices to labels.
labels = tf.tile(labels[:, :, None], [1, 1, self._num_anchors, 1])
labels = tf.concat([labels, anchor_indices[..., None]], axis=-1)
# Bias is used to determine the matching. 0.5 means matching anchors that
# fall in the 0.5 differences in the feature map. For instance, a box
# coordinates of (15.6, 35.4) will match the anchors at [15, 35], [16, 35],
# and [15, 34].
bias = 0.5 # bias
off = (
tf.constant(
[
[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j, k, l, m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
],
tf.float32,
)
* bias
) # offsets
for i in range(self._num_layers):
anchors = self._anchors[i]
_, _, h, w, _ = predictions[str(i + 3)].get_shape().as_list()
gain = tf.constant([1, w, h, w, h, 1], dtype=tf.float32)
t = labels * gain
# Filter out targets that do not match the current anchors.
wh_ratio = t[..., 3:5] / tf.cast(anchors[None, None], tf.float32)
labels_mask = tf.less(
tf.reduce_max(tf.maximum(wh_ratio, 1.0 / wh_ratio), axis=-1),
self._anchor_threshold,
)[..., None]
# Compute valid mask for ground-truths.
labels_mask = tf.logical_and(t[..., :1] != -1, labels_mask)
labels_mask = tf.reshape(labels_mask, [batch_size, -1])
t = tf.reshape(t, [batch_size, -1, 6])
# Find the matching offsets for valid labels.
gxy = t[..., 1:3] # grid xy
gxi = gain[1:3] - gxy # inverse
j, k = tf.split((gxy % 1.0 < bias) & (gxy >= 1.0), 2, axis=-1)
l, m = tf.split((gxi % 1.0 < bias) & (gxi >= 1.0), 2, axis=-1)
j, k, l, m = j[..., 0], k[..., 0], l[..., 0], m[..., 0]
# Note that j and l, k and m are conjugate to each other, so at most one
# of them will be True during running. Therefore, we can reduce memory
# usage by gathering the selected index.
x_map = tf.cast(tf.stack([j, l], axis=-1), tf.int8)
y_map = tf.cast(tf.stack([k, m], axis=-1), tf.int8)
# Add the indices offsets.
x_indices = tf.argmax(x_map, axis=-1) * 2 + 1
y_indices = tf.argmax(y_map, axis=-1) * 2 + 2
three_targets_indices = tf.stack(
[tf.zeros_like(x_indices), x_indices, y_indices], axis=-1
)[..., None]
# Gather the selected 3 targets from the 5-target map.
j = tf.stack([tf.ones_like(j), j, k, l, m], axis=-1)
three_targets_mask = tf.gather_nd(j, three_targets_indices, batch_dims=2)
labels_mask = tf.tile(labels_mask[:, :, None], [1, 1, 5])
t = tf.tile(t[:, :, None], [1, 1, 5, 1])
labels_mask = tf.gather_nd(
labels_mask, three_targets_indices, batch_dims=2
)
t = tf.gather_nd(t, three_targets_indices, batch_dims=2)
offsets = tf.zeros_like(gxy)[:, :, None] + off[None, None]
offsets = tf.gather_nd(offsets, three_targets_indices, batch_dims=2)
cls_target = tf.cast(t[..., 0], tf.int32)
gxy, gwh = t[..., 1:3], t[..., 3:5]
# Find the actual grid locations.
gij = tf.cast(gxy - offsets * 2, tf.int32)
gi, gj = tf.split(gij, 2, axis=-1)
gi, gj = gi[..., 0], gj[..., 0]
# Append the result.
anchor_idx = tf.cast(t[..., 5], tf.int32)
gain = tf.cast(gain, tf.int32)
gi = tf.clip_by_value(gi, 0, gain[2] - 1)
gj = tf.clip_by_value(gj, 0, gain[3] - 1)
gij = tf.stack([gi, gj], axis=-1)
labels_mask = tf.logical_and(labels_mask, three_targets_mask)
masks.append(labels_mask)
indices.append(tf.stack([anchor_idx, gj, gi], axis=-1))
anch.append(tf.gather(anchors, anchor_idx))
cls_targets.append(cls_target)
box_targets.append(
tf.concat([gxy - tf.cast(gij, tf.float32), gwh], axis=-1)) # box
# [batch_size, num_layers, num_anchors * max_boxes, num_targets]
masks = tf.stack(masks, axis=1)
indices = tf.stack(indices, axis=1)
anch = tf.stack(anch, axis=1)
cls_targets = tf.stack(cls_targets, axis=1)
box_targets = tf.stack(box_targets, axis=1)
return masks, indices, anch, cls_targets, box_targets
def report_separate_losses(self):
return {
'box_loss': self._box_loss,
'obj_loss': self._obj_loss,
'cls_loss': self._cls_loss,
'iou': self._iou,
}
def report_stats(self):
return {
'num_gts': self._num_gts,
'num_matchings': self._num_matchings,
# No duplicates.
'num_duplicates': tf.constant(0),
}
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'box_weight': self._box_weight,
'obj_weight': self._obj_weight,
'cls_weight': self._cls_weight,
'pos_targets': self._pos_targets,
'neg_targets': self._neg_targets,
'num_classes': self._num_classes,
'num_layers': self._num_layers,
'num_anchors': self._num_anchors,
'auto_balance': self._auto_balance,
'balance': self._balance,
'strides': self._strides,
'anchors': self._anchors,
'input_size': self._input_size,
'anchor_threshold': self._anchor_threshold,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class YoloV7LossOTA(tf.keras.losses.Loss):
"""YOLOv7 loss function with OTA.
OTA (Optimal Transport Assignment) uses Sinkhorn-Knopp algorithm to copmute
a matching between anchors and ground-truth labels.
Paper: https://arxiv.org/pdf/2103.14259.pdf
"""
def __init__(
self,
anchors,
strides,
input_size,
alpha=0.25,
gamma=1.5,
box_weight=0.05,
obj_weight=0.7,
cls_weight=0.3,
iou_weight=3.0,
label_smoothing=0.0,
anchor_threshold=4.0,
iou_mix_ratio=1.0,
num_classes=80,
auto_balance=False,
reduction=tf.keras.losses.Reduction.NONE,
name=None,
):
"""Constructor for YOLOv7 loss OTA.
Follows the implementation here:
https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py#L556
Args:
anchors: a 2D array represents different anchors used at each level.
strides: a 1D array represents the strides. Note that all numbers should
be a power of 2, and they usually start with level 3 and end at level 5
or 7. Therefore, the list should usually be [8, 16, 32] or [8, 16, 32,
64, 128].
input_size: a list containing the height and width of the input image.
alpha: alpha for focal loss.
gamma: gamma for focal loss. If set to 0, focal loss will be disabled.
box_weight: float weight scalar applied to bounding box loss.
obj_weight: float weight scalar applied to objectness loss.
cls_weight: float weight scalar applied to class loss.
iou_weight: float weight scalar to mix class loss and IoU class to
construct the cost matrix.
label_smoothing: small float number used to compute positive and negative
targets. If set to 0, the positive targets will be 1 and negative
targets will be 0.
anchor_threshold: threshold for the anchor matching. Larger number allows
more displacements between anchors and targets.
iou_mix_ratio: float ratio to mix the IoU score with the positive target,
which is 1.
num_classes: number of classes.
auto_balance: a boolean flag that indicates whether auto balance should be
used. If used, the default balance factors will automatically update for
each batch.
reduction: Reduction method. Should be set to None at all time as this
loss module always output a loss scalar.
name: Optional name for the loss.
"""
# Loss required fields.
self._num_classes = num_classes
self._num_layers = len(strides)
self._num_anchors = len(anchors[0])
self._anchors = []
self._strides = strides
self._input_size = input_size
self._iou_mix_ratio = iou_mix_ratio
# Scale down anchors by the strides to match the feature map.
for i, stride in enumerate(strides):
self._anchors.append(tf.constant(anchors[i], tf.float32) / stride)
self._anchor_threshold = anchor_threshold
self._pos_targets, self._neg_targets = smooth_bce_targets(label_smoothing)
if gamma > 0:
self._cls_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='cls_loss')
self._obj_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='obj_loss')
else:
self._cls_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
self._obj_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
# Weight to combine losses
self._box_weight = box_weight
self._obj_weight = obj_weight * input_size[0] / 640 * input_size[1] / 640
self._cls_weight = cls_weight * num_classes / 80
# Weight to construct cost matrix
self._iou_weight = iou_weight
# Layer balance scalar
self._balance = _LAYER_BALANCE[str(self._num_layers)][:]
for i, bal in enumerate(self._balance):
self._balance[i] = tf.constant(bal, tf.float32)
self._auto_balance = auto_balance
assert 16 in strides, (
'Expect level 4 (stride of 16) always exist in the strides, received %s'
% strides
)
self._ssi = list(strides).index(16) if auto_balance else 0 # stride 16 idx
super().__init__(reduction=reduction, name=name)
def call(self, labels, predictions):
"""Comptues the OTA loss.
Args:
labels: a dictionary contains the following required keys:
- classes: class indices in shape [batch_size, max_num_instances].
- bbox: bounding boxes in shape [batch_size, max_num_instances, 4].
- image_info: image info in shape [batch_size, 4, 2].
predictions: a dictionary contains model outputs at different layers.
They are in shape of [batch_size, h_at_level, w_at_level, num_anchors,
num_classes + 4 (box coordinates) + 1 (objectness)].
Returns:
The scaled loss (up by batch size) from OTA.
"""
image_info = labels['image_info']
# Convert labels dictionary into tensors.
labels = merge_labels(labels)
p = {}
for key in predictions:
# [batch_size, num_anchors, height, width, num_classes + boxes + obj]
p[key] = tf.transpose(predictions[key], [0, 3, 1, 2, 4])
cls_loss, box_loss, obj_loss, iou_metric = [tf.zeros(1) for _ in range(4)]
total_num_matchings = tf.zeros(1)
total_num_gts = tf.reduce_sum(tf.cast(labels[..., 0] != -1, tf.float32))
(matched_indices, matched_anchors, matched_mask, matched_targets,
num_duplicates) = self._build_targets(labels, p, image_info)
# Get height and width for each layers.
pre_gen_gains = [
tf.gather(tf.shape(p[str(i + 3)]), [3, 2, 3, 2])
for i in range(self._num_layers)
]
batch_size = tf.shape(matched_indices)[0]
layer_shape = [batch_size, self._num_layers, -1]
# [anchor_indices, grid_js, grid_is]
masks = tf.reshape(matched_mask, layer_shape)
indices = tf.reshape(matched_indices, [*layer_shape, 3])
anchors = tf.reshape(matched_anchors, [*layer_shape, 2])
targets = tf.reshape(matched_targets, [*layer_shape, 5])
# Losses
for layer_idx, layer_pred in p.items():
# Always assume the output level starts with 3.
i = int(layer_idx) - 3
obj_targets = tf.zeros_like(layer_pred[..., 0])
# Get layer inputs
layer_masks = masks[:, i]
num_matchings = tf.reduce_sum(tf.cast(layer_masks, tf.int32))
total_num_matchings += tf.cast(num_matchings, tf.float32)
if num_matchings > 0:
layer_indices = indices[:, i]
batch_indices = tf.tile(
tf.range(batch_size)[:, None], [1, tf.shape(layer_indices)[1]]
)[..., None]
layer_indices = tf.concat([batch_indices, layer_indices], axis=-1)
layer_indices = tf.boolean_mask(layer_indices, layer_masks)
layer_anchors = tf.boolean_mask(anchors[:, i], layer_masks)
layer_targets = tf.boolean_mask(targets[:, i], layer_masks)
layer_cls_targets = tf.cast(layer_targets[:, 0], tf.int32)
layer_box_targets = layer_targets[:, 1:]
# In the same shape of layer_target.
matched_pred = tf.gather_nd(layer_pred, layer_indices)
pred_xcyc = tf.sigmoid(matched_pred[..., :2]) * 2 - 0.5
pred_wh = (
tf.square(tf.sigmoid(matched_pred[..., 2:4]) * 2) * layer_anchors)
pred_xcycwh = tf.concat([pred_xcyc, pred_wh], axis=-1)
grid = tf.cast(
tf.stack(
[
layer_indices[:, 3], # gi
layer_indices[:, 2], # gj
tf.zeros_like(layer_indices[:, 0]),
tf.zeros_like(layer_indices[:, 0]),
],
axis=-1,
),
tf.float32,
)
target_xcycwh = layer_box_targets * tf.cast(
pre_gen_gains[i], layer_targets.dtype
)
target_xcycwh -= grid
_, ciou = box_ops.compute_ciou(target_xcycwh, pred_xcycwh)
box_loss += tf.reduce_mean(1.0 - ciou)
iou_metric += tf.reduce_mean(ciou)
# Compute classification loss.
if self._num_classes > 1: # cls loss (only if multiple classes)
t = tf.one_hot(
layer_cls_targets,
self._num_classes,
on_value=self._pos_targets,
off_value=self._neg_targets,
)
cls_loss += tf.reduce_mean(
self._cls_loss_fn(t, matched_pred[..., 5:]))
# Compute objectness loss.
iou_ratio = tf.cast(
(1.0 - self._iou_mix_ratio)
+ (self._iou_mix_ratio * tf.maximum(tf.stop_gradient(ciou), 0)),
obj_targets.dtype,
)
obj_targets = tf.tensor_scatter_nd_max(
obj_targets, layer_indices, iou_ratio
)
layer_obj_loss = tf.reduce_mean(
self._obj_loss_fn(obj_targets, layer_pred[..., 4])
)
obj_loss += layer_obj_loss * self._balance[i]
# Updates the balance factor, which is a moving average of previous
# factor at the same level.
if self._auto_balance:
self._balance[i] = self._balance[
i
] * 0.9999 + 0.0001 / tf.stop_gradient(layer_obj_loss)
# Re-balance the factors so that stride at self._ssi always receives 1.
if self._auto_balance:
self._balance = [x / self._balance[self._ssi] for x in self._balance]
# Keep separate losses for summary purpose.
box_loss *= self._box_weight
obj_loss *= self._obj_weight
cls_loss *= self._cls_weight
self._iou = tf.stop_gradient(iou_metric) / self._num_layers
self._num_matchings = tf.stop_gradient(
total_num_matchings) / tf.cast(batch_size, tf.float32)
self._num_gts = total_num_gts / tf.cast(batch_size, tf.float32)
self._num_duplicates = tf.stop_gradient(
num_duplicates) / tf.cast(batch_size, tf.float32)
self._box_loss = tf.stop_gradient(box_loss)
self._obj_loss = tf.stop_gradient(obj_loss)
self._cls_loss = tf.stop_gradient(cls_loss)
loss = box_loss + obj_loss + cls_loss
# Scale up the loss by batch size.
return loss * tf.cast(batch_size, loss.dtype)
def _build_targets(self, labels, predictions, image_info):
"""Finds the matching targets using Sinkhorn-Knopp."""
# Find the three positives matching first for predictions.
masks, indices, anchors = self._find_three_positives(labels, predictions)
batch_size = tf.shape(masks)[0]
# Collect the predictions.
p_box, p_cls, p_obj = [], [], []
for layer_key, layer_p in predictions.items():
# Always assume level starts from 3.
i = int(layer_key) - 3
layer_indices = tf.reshape(indices[:, i], [batch_size, -1, 3])
anchor = tf.reshape(anchors[:, i], [batch_size, -1, 2])
fg_pred = tf.gather_nd(layer_p, layer_indices, batch_dims=1)
grid = tf.stack([layer_indices[..., 2], layer_indices[..., 1]], axis=-1)
grid = tf.cast(grid, fg_pred.dtype)
pxy = (tf.sigmoid(fg_pred[..., :2]) * 2 - 0.5 + grid) * self._strides[i]
pwh = (
tf.square(tf.sigmoid(fg_pred[..., 2:4]) * 2)
* anchor
* self._strides[i]
)
pxywh = tf.concat([pxy, pwh], axis=-1)
p_box.append(pxywh)
p_obj.append(fg_pred[..., 4:5])
p_cls.append(fg_pred[..., 5:])
p_box = tf.concat(p_box, axis=1)
p_cls = tf.concat(p_cls, axis=1)
p_obj = tf.concat(p_obj, axis=1)
# Compute valid masks for both targets and predictions.
t_mask = labels[..., 0] != -1
p_mask = tf.reshape(masks, [batch_size, -1])
# [anchor_idx, gj, gi]
indices = tf.reshape(indices, [batch_size, -1, 3])
anchors = tf.reshape(anchors, [batch_size, -1, 2])
num_preds = tf.shape(p_box)[1]
num_gts = tf.shape(labels)[1]
# Computes pair-wise IoU.
t_box = labels[..., 1:5] * tf.tile(image_info[0, 1], [2])
pair_wise_iou = box_ops.compute_iou(t_box[:, :, None], p_box[:, None])
pair_wise_iou_loss = -tf.math.log(pair_wise_iou + 1e-8)
# Computes pair-wise class loss.
y = tf.sqrt(tf.sigmoid(p_cls) * tf.sigmoid(p_obj))
# Add 1e-9 to avoid nan.
logits = tf.math.log(y / (1 - y + 1e-9) + 1e-9)
logits = tf.tile(logits[:, None], [1, num_gts, 1, 1])
t_cls = tf.cast(labels[..., 0], tf.int32)
class_labels = tf.one_hot(t_cls, self._num_classes, dtype=tf.float32)
class_labels = tf.tile(class_labels[:, :, None], [1, 1, num_preds, 1])
pair_wise_cls_loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(class_labels, logits), axis=-1
)
# Compute the cost matrix and its corresponding valid mask.
cost_mask = tf.logical_and(t_mask[..., None], p_mask[:, None])
cost = tf.stop_gradient(pair_wise_cls_loss + 3 * pair_wise_iou_loss)
largest_cost = tf.reduce_max(cost)
# Set invalid IoU to 0.0 for top_k.
valid_iou = tf.where(cost_mask, pair_wise_iou, tf.zeros_like(pair_wise_iou))
# Compute top-10 IoUs from valid IoUs for each target.
# When matched predictions is smaller than 10, we only want the top-k where
# k is the total size of the matched predictions (k < 10).
top_k_mask = tf.less(
tf.range(10)[None],
tf.minimum(10, tf.reduce_sum(tf.cast(p_mask, tf.int32), axis=-1))[
:, None
],
)
top_k_mask = tf.logical_and(top_k_mask[:, None], t_mask[..., None])
top_k, _ = tf.nn.top_k(valid_iou, k=10)
top_k = tf.where(top_k_mask, top_k, tf.zeros_like(top_k))
# Use top_k to compute the dynamic ks for target matching. Each target_i can
# match to k_i predictions, and k_i is computed based on the pair-wise
# valid IoU.
dynamic_ks = tf.maximum(tf.cast(tf.reduce_sum(top_k, axis=-1), tf.int32), 1)
dynamic_ks = tf.where(t_mask, dynamic_ks, tf.zeros_like(dynamic_ks))
dynamic_ks = tf.stop_gradient(dynamic_ks)
dynamic_mask = tf.range(10)[None, None] < dynamic_ks[..., None]
# Set the invalid field to maximum cost so that they won't be selected
# during matching.
cost = tf.where(cost_mask, cost, tf.ones_like(cost) * (largest_cost + 1))
matching_matrix = tf.zeros_like(cost, dtype=tf.int32)
_, pred_idx = tf.nn.top_k(-cost, k=10)
# Update matching matrix.
# [batch_size, num_gts, 10]
batch_idx = tf.tile(tf.range(batch_size)[:, None, None], [1, num_gts, 10])
gt_idx = tf.tile(tf.range(num_gts)[None, :, None], [batch_size, 1, 10])
matched_indices = tf.stack([batch_idx, gt_idx, pred_idx], axis=-1)
matching_matrix = tf.tensor_scatter_nd_add(
matching_matrix,
matched_indices,
tf.cast(dynamic_mask, matching_matrix.dtype),
)
# Detect if there is a detection matches to multiple targets, if so, we
# assign it to the target with minimum cost.
duplicate_mask = tf.reduce_sum(matching_matrix, axis=1) > 1
num_duplicates = tf.reduce_sum(tf.cast(duplicate_mask, tf.float32))
cost_argmin = tf.argmin(cost, axis=1, output_type=tf.int32)
remove_mask = tf.tile(duplicate_mask[:, None], [1, num_gts, 1])
matching_matrix = tf.where(
remove_mask, tf.zeros_like(matching_matrix), matching_matrix)
min_mask = tf.equal(
tf.tile(tf.range(num_gts)[None, :, None], [batch_size, 1, num_preds]),
cost_argmin[:, None],
)
update_mask = tf.logical_and(min_mask, duplicate_mask[:, None])
matching_matrix = tf.where(
update_mask, tf.ones_like(matching_matrix), matching_matrix)
# Find the final matching and collect the matched targets.
matched_gt_indices = tf.argmax(
matching_matrix, axis=1, output_type=tf.int32
)
matched_mask = tf.reduce_sum(matching_matrix, axis=1) > 0
matched_targets = tf.gather_nd(
labels, matched_gt_indices[..., None], batch_dims=1
)
return indices, anchors, matched_mask, matched_targets, num_duplicates
def _find_three_positives(self, labels, predictions):
"""Finds three matching anchors for each ground-truth."""
label_shape = tf.shape(labels)
batch_size, max_boxes = label_shape[0], label_shape[1]
masks, indices, anch = [], [], []
anchor_indices = tf.tile(
tf.range(self._num_anchors, dtype=tf.float32)[None, None],
[batch_size, max_boxes, 1],
)
# Append anchor indices to labels.
labels = tf.tile(labels[:, :, None], [1, 1, self._num_anchors, 1])
labels = tf.concat([labels, anchor_indices[..., None]], axis=-1)
# Bias is used to determine the matching. 0.5 means matching anchors that
# fall in the 0.5 differences in the feature map. For instance, a box
# coordinates of (15.6, 35.4) will match the anchors at [15, 35], [16, 35],
# and [15, 34].
bias = 0.5 # bias
off = (
tf.constant(
[
[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j, k, l, m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
],
tf.float32,
)
* bias
) # offsets
for i in range(self._num_layers):
anchors = self._anchors[i]
_, _, h, w, _ = predictions[str(i + 3)].get_shape().as_list()
gain = tf.constant([1, w, h, w, h, 1], dtype=tf.float32)
t = labels * gain
# Filter out targets that do not match the current anchors.
wh_ratio = t[..., 3:5] / tf.cast(anchors[None, None], tf.float32)
labels_mask = tf.less(
tf.reduce_max(tf.maximum(wh_ratio, 1.0 / wh_ratio), axis=-1),
self._anchor_threshold,
)[..., None]
# Compute valid mask for ground-truths.
labels_mask = tf.logical_and(t[..., :1] != -1, labels_mask)
labels_mask = tf.reshape(labels_mask, [batch_size, -1])
t = tf.reshape(t, [batch_size, -1, 6])
# Find the matching offsets for valid labels.
gxy = t[..., 1:3] # grid xy
gxi = gain[1:3] - gxy # inverse
j, k = tf.split((gxy % 1.0 < bias) & (gxy >= 1.0), 2, axis=-1)
l, m = tf.split((gxi % 1.0 < bias) & (gxi >= 1.0), 2, axis=-1)
j, k, l, m = j[..., 0], k[..., 0], l[..., 0], m[..., 0]
# Note that j and l, k and m are conjugate to each other, so at most one
# of them will be True during running. Therefore, we can reduce memory
# usage by gathering the selected index.
x_map = tf.cast(tf.stack([j, l], axis=-1), tf.int8)
y_map = tf.cast(tf.stack([k, m], axis=-1), tf.int8)
# Add the indices offsets.
x_indices = tf.argmax(x_map, axis=-1) * 2 + 1
y_indices = tf.argmax(y_map, axis=-1) * 2 + 2
three_targets_indices = tf.stack(
[tf.zeros_like(x_indices), x_indices, y_indices], axis=-1
)[..., None]
# Gather the selected 3 targets from the 5-target map.
j = tf.stack([tf.ones_like(j), j, k, l, m], axis=-1)
three_targets_mask = tf.gather_nd(j, three_targets_indices, batch_dims=2)
labels_mask = tf.tile(labels_mask[:, :, None], [1, 1, 5])
t = tf.tile(t[:, :, None], [1, 1, 5, 1])
labels_mask = tf.gather_nd(
labels_mask, three_targets_indices, batch_dims=2
)
t = tf.gather_nd(t, three_targets_indices, batch_dims=2)
offsets = tf.zeros_like(gxy)[:, :, None] + off[None, None]
offsets = tf.gather_nd(offsets, three_targets_indices, batch_dims=2)
gxy = t[..., 1:3]
# Find the actual grid locations.
gij = tf.cast(gxy - offsets * 2, tf.int32)
gi, gj = tf.split(gij, 2, axis=-1)
gi, gj = gi[..., 0], gj[..., 0]
# Append the result.
anchor_idx = tf.cast(t[..., 5], tf.int32)
gain = tf.cast(gain, tf.int32)
gi = tf.clip_by_value(gi, 0, gain[2] - 1)
gj = tf.clip_by_value(gj, 0, gain[3] - 1)
labels_mask = tf.logical_and(labels_mask, three_targets_mask)
masks.append(labels_mask)
indices.append(tf.stack([anchor_idx, gj, gi], axis=-1))
anch.append(tf.gather(anchors, anchor_idx))
# [batch_size, num_layers, num_anchors * max_boxes, num_targets]
masks = tf.stack(masks, axis=1)
indices = tf.stack(indices, axis=1)
anch = tf.stack(anch, axis=1)
return masks, indices, anch
def report_stats(self):
return {
'num_gts': self._num_gts,
'num_matchings': self._num_matchings,
'num_duplicates': self._num_duplicates,
}
def report_separate_losses(self):
"""Returns separate losses that construct the reported loss."""
return {
'iou': self._iou,
'box_loss': self._box_loss,
'obj_loss': self._obj_loss,
'cls_loss': self._cls_loss,
}
def get_config(self):
"""Configs for the loss constructor."""
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'box_weight': self._box_weight,
'obj_weight': self._obj_weight,
'cls_weight': self._cls_weight,
'iou_weight': self._iou_weight,
'iou_mix_ratio': self._iou_mix_ratio,
'pos_targets': self._pos_targets,
'neg_targets': self._neg_targets,
'num_classes': self._num_classes,
'num_layers': self._num_layers,
'num_anchors': self._num_anchors,
'auto_balance': self._auto_balance,
'balance': self._balance,
'strides': self._strides,
'anchors': self._anchors,
'input_size': self._input_size,
'anchor_threshold': self._anchor_threshold,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 36,619 | 37.34555 | 80 | py |
models | models-master/official/projects/yolo/losses/yolo_loss.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo Loss function."""
import abc
import collections
import functools
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import loss_utils
from official.projects.yolo.ops import math_ops
class YoloLossBase(object, metaclass=abc.ABCMeta):
"""Parameters for the YOLO loss functions used at each detection generator.
This base class implements the base functionality required to implement a Yolo
Loss function.
"""
def __init__(self,
classes,
anchors,
path_stride=1,
ignore_thresh=0.7,
truth_thresh=1.0,
loss_type='ciou',
iou_normalizer=1.0,
cls_normalizer=1.0,
object_normalizer=1.0,
label_smoothing=0.0,
objectness_smooth=True,
update_on_repeat=False,
box_type='original',
scale_x_y=1.0,
max_delta=10):
"""Loss Function Initialization.
Args:
classes: `int` for the number of classes
anchors: `List[List[int]]` for the anchor boxes that are used in the model
at all levels. For anchor free prediction set the anchor list to be the
same as the image resolution.
path_stride: `int` for how much to scale this level to get the orginal
input shape.
ignore_thresh: `float` for the IOU value over which the loss is not
propagated, and a detection is assumed to have been made.
truth_thresh: `float` for the IOU value over which the loss is propagated
despite a detection being made.
loss_type: `str` for the typeof iou loss to use with in {ciou, diou, giou,
iou}.
iou_normalizer: `float` for how much to scale the loss on the IOU or the
boxes.
cls_normalizer: `float` for how much to scale the loss on the classes.
object_normalizer: `float` for how much to scale loss on the detection
map.
label_smoothing: `float` for how much to smooth the loss on the classes.
objectness_smooth: `float` for how much to smooth the loss on the
detection map.
update_on_repeat: `bool` for whether to replace with the newest or the
best value when an index is consumed by multiple objects.
box_type: `bool` for which scaling type to use.
scale_x_y: dictionary `float` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level.
max_delta: gradient clipping to apply to the box loss.
"""
self._loss_type = loss_type
self._classes = classes
self._num = tf.cast(len(anchors), dtype=tf.int32)
self._truth_thresh = truth_thresh
self._ignore_thresh = ignore_thresh
self._anchors = anchors
self._iou_normalizer = iou_normalizer
self._cls_normalizer = cls_normalizer
self._object_normalizer = object_normalizer
self._scale_x_y = scale_x_y
self._max_delta = max_delta
self._label_smoothing = tf.cast(label_smoothing, tf.float32)
self._objectness_smooth = float(objectness_smooth)
self._update_on_repeat = update_on_repeat
self._box_type = box_type
self._path_stride = path_stride
box_kwargs = dict(
stride=self._path_stride,
scale_xy=self._scale_x_y,
box_type=self._box_type,
max_delta=self._max_delta)
self._decode_boxes = functools.partial(
loss_utils.get_predicted_box, **box_kwargs)
self._search_pairs = lambda *args: (None, None, None, None)
self._build_per_path_attributes()
def box_loss(self, true_box, pred_box, darknet=False):
"""Call iou function and use it to compute the loss for the box maps."""
if self._loss_type == 'giou':
iou, liou = box_ops.compute_giou(true_box, pred_box)
elif self._loss_type == 'ciou':
iou, liou = box_ops.compute_ciou(true_box, pred_box, darknet=darknet)
else:
liou = iou = box_ops.compute_iou(true_box, pred_box)
loss_box = 1 - liou
return iou, liou, loss_box
def _tiled_global_box_search(self,
pred_boxes,
pred_classes,
boxes,
classes,
true_conf,
smoothed,
scale=None):
"""Search of all groundtruths to associate groundtruths to predictions."""
boxes = box_ops.yxyx_to_xcycwh(boxes)
if scale is not None:
boxes = boxes * tf.cast(tf.stop_gradient(scale), boxes.dtype)
# Search all predictions against ground truths to find mathcing boxes for
# each pixel.
_, _, iou_max, _ = self._search_pairs(pred_boxes, pred_classes, boxes,
classes)
if iou_max is None:
return true_conf, tf.ones_like(true_conf)
# Find the exact indexes to ignore and keep.
ignore_mask = tf.cast(iou_max < self._ignore_thresh, pred_boxes.dtype)
iou_mask = iou_max > self._ignore_thresh
if not smoothed:
# Ignore all pixels where a box was not supposed to be predicted but a
# high confidence box was predicted.
obj_mask = true_conf + (1 - true_conf) * ignore_mask
else:
# Replace pixels in the tre confidence map with the max iou predicted
# with in that cell.
obj_mask = tf.ones_like(true_conf)
iou_ = (1 - self._objectness_smooth) + self._objectness_smooth * iou_max
iou_ = tf.where(iou_max > 0, iou_, tf.zeros_like(iou_))
true_conf = tf.where(iou_mask, iou_, true_conf)
# Stop gradient so while loop is not tracked.
obj_mask = tf.stop_gradient(obj_mask)
true_conf = tf.stop_gradient(true_conf)
return true_conf, obj_mask
def __call__(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Call function to compute the loss and a set of metrics per FPN level.
Args:
true_counts: `Tensor` of shape [batchsize, height, width, num_anchors]
represeneting how many boxes are in a given pixel [j, i] in the output
map.
inds: `Tensor` of shape [batchsize, None, 3] indicating the location [j,
i] that a given box is associatied with in the FPN prediction map.
y_true: `Tensor` of shape [batchsize, None, 8] indicating the actual box
associated with each index in the inds tensor list.
boxes: `Tensor` of shape [batchsize, None, 4] indicating the original
ground truth boxes for each image as they came from the decoder used for
bounding box search.
classes: `Tensor` of shape [batchsize, None, 1] indicating the original
ground truth classes for each image as they came from the decoder used
for bounding box search.
y_pred: `Tensor` of shape [batchsize, height, width, output_depth] holding
the models output at a specific FPN level.
Returns:
loss: `float` for the actual loss.
box_loss: `float` loss on the boxes used for metrics.
conf_loss: `float` loss on the confidence used for metrics.
class_loss: `float` loss on the classes used for metrics.
avg_iou: `float` metric for the average iou between predictions and ground
truth.
avg_obj: `float` metric for the average confidence of the model for
predictions.
"""
(loss, box_loss, conf_loss, class_loss, mean_loss, iou, pred_conf, ind_mask,
grid_mask) = self._compute_loss(true_counts, inds, y_true, boxes, classes,
y_pred)
# Metric compute using done here to save time and resources.
sigmoid_conf = tf.stop_gradient(tf.sigmoid(pred_conf))
iou = tf.stop_gradient(iou)
avg_iou = loss_utils.average_iou(
loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), iou))
avg_obj = loss_utils.average_iou(
tf.squeeze(sigmoid_conf, axis=-1) * grid_mask)
return (loss, box_loss, conf_loss, class_loss, mean_loss,
tf.stop_gradient(avg_iou), tf.stop_gradient(avg_obj))
@abc.abstractmethod
def _build_per_path_attributes(self):
"""Additional initialization required for each YOLO loss version."""
...
@abc.abstractmethod
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""The actual logic to apply to the raw model for optimization."""
...
def post_path_aggregation(self, loss, box_loss, conf_loss, class_loss,
ground_truths, predictions): # pylint:disable=unused-argument
"""This method allows for post processing of a loss value.
After the loss has been aggregated across all the FPN levels some post
proceessing may need to occur to poroperly scale the loss. The default
behavior is to pass the loss through with no alterations. Passing the
individual losses for each mask will allow for aggeregation of loss across
paths for some losses.
Args:
loss: `tf.float` scalar for the actual loss.
box_loss: `tf.float` for the loss on the boxs only.
conf_loss: `tf.float` for the loss on the confidences only.
class_loss: `tf.float` for the loss on the classes only.
ground_truths: `Dict` holding all the ground truth tensors.
predictions: `Dict` holding all the predicted values.
Returns:
loss: `tf.float` scalar for the scaled loss.
scale: `tf.float` how much the loss was scaled by.
"""
del box_loss
del conf_loss
del class_loss
del ground_truths
del predictions
return loss, tf.ones_like(loss)
@abc.abstractmethod
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This controls how the loss should be aggregated across replicas."""
...
@tf.custom_gradient
def grad_sigmoid(values):
"""This function scales the gradient as if a signmoid was applied.
This is used in the Darknet Loss when the choosen box type is the scaled
coordinate type. This function is used to match the propagated gradient to
match that of the Darkent Yolov4 model. This is an Identity operation that
allows us to add some extra steps to the back propagation.
Args:
values: A tensor of any shape.
Returns:
values: The unaltered input tensor.
delta: A custom gradient function that adds the sigmoid step to the
backpropagation.
"""
def delta(dy):
t = tf.math.sigmoid(values)
return dy * t * (1 - t)
return values, delta
class DarknetLoss(YoloLossBase):
"""This class implements the full logic for the standard Yolo models."""
def _build_per_path_attributes(self):
"""Paramterization of pair wise search and grid generators.
Objects created here are used for box decoding and dynamic ground truth
association.
"""
self._anchor_generator = loss_utils.GridGenerator(
anchors=self._anchors,
scale_anchors=self._path_stride)
if self._ignore_thresh > 0.0:
self._search_pairs = loss_utils.PairWiseSearch(
iou_type='iou', any_match=True, min_conf=0.25)
return
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Per FPN path loss logic used for Yolov3, Yolov4, and Yolo-Tiny."""
if self._box_type == 'scaled':
# Darknet Model Propagates a sigmoid once in back prop so we replicate
# that behaviour
y_pred = grad_sigmoid(y_pred)
# Generate and store constants and format output.
shape = tf.shape(true_counts)
batch_size, width, height, num = shape[0], shape[1], shape[2], shape[3]
fwidth = tf.cast(width, tf.float32)
fheight = tf.cast(height, tf.float32)
grid_points, anchor_grid = self._anchor_generator(
width, height, batch_size, dtype=tf.float32)
# Cast all input compontnts to float32 and stop gradient to save memory.
boxes = tf.stop_gradient(tf.cast(boxes, tf.float32))
classes = tf.stop_gradient(tf.cast(classes, tf.float32))
y_true = tf.stop_gradient(tf.cast(y_true, tf.float32))
true_counts = tf.stop_gradient(tf.cast(true_counts, tf.float32))
true_conf = tf.stop_gradient(tf.clip_by_value(true_counts, 0.0, 1.0))
grid_points = tf.stop_gradient(grid_points)
anchor_grid = tf.stop_gradient(anchor_grid)
# Split all the ground truths to use as separate items in loss computation.
(true_box, ind_mask, true_class) = tf.split(y_true, [4, 1, 1], axis=-1)
true_conf = tf.squeeze(true_conf, axis=-1)
true_class = tf.squeeze(true_class, axis=-1)
grid_mask = true_conf
# Splits all predictions.
y_pred = tf.cast(
tf.reshape(y_pred, [batch_size, width, height, num, -1]), tf.float32)
pred_box, pred_conf, pred_class = tf.split(y_pred, [4, 1, -1], axis=-1)
# Decode the boxes to be used for loss compute.
_, _, pred_box = self._decode_boxes(
fwidth, fheight, pred_box, anchor_grid, grid_points, darknet=True)
# If the ignore threshold is enabled, search all boxes ignore all
# IOU valeus larger than the ignore threshold that are not in the
# noted ground truth list.
if self._ignore_thresh != 0.0:
(true_conf, obj_mask) = self._tiled_global_box_search(
pred_box,
tf.stop_gradient(tf.sigmoid(pred_class)),
boxes,
classes,
true_conf,
smoothed=self._objectness_smooth > 0)
# Build the one hot class list that are used for class loss.
true_class = tf.one_hot(
tf.cast(true_class, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
true_class = tf.stop_gradient(loss_utils.apply_mask(ind_mask, true_class))
# Reorganize the one hot class list as a grid.
true_class_grid = loss_utils.build_grid(
inds, true_class, pred_class, ind_mask, update=False)
true_class_grid = tf.stop_gradient(true_class_grid)
# Use the class mask to find the number of objects located in
# each predicted grid cell/pixel.
counts = true_class_grid
counts = tf.reduce_sum(counts, axis=-1, keepdims=True)
reps = tf.gather_nd(counts, inds, batch_dims=1)
reps = tf.squeeze(reps, axis=-1)
reps = tf.stop_gradient(tf.where(reps == 0.0, tf.ones_like(reps), reps))
# Compute the loss for only the cells in which the boxes are located.
pred_box = loss_utils.apply_mask(ind_mask,
tf.gather_nd(pred_box, inds, batch_dims=1))
iou, _, box_loss = self.box_loss(true_box, pred_box, darknet=True)
box_loss = loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), box_loss)
box_loss = math_ops.divide_no_nan(box_loss, reps)
box_loss = tf.cast(tf.reduce_sum(box_loss, axis=1), dtype=y_pred.dtype)
if self._update_on_repeat:
# Converts list of gound truths into a grid where repeated values
# are replaced by the most recent value. So some class identities may
# get lost but the loss computation will be more stable. Results are
# more consistent.
# Compute the sigmoid binary cross entropy for the class maps.
class_loss = tf.reduce_mean(
loss_utils.sigmoid_bce(
tf.expand_dims(true_class_grid, axis=-1),
tf.expand_dims(pred_class, axis=-1), self._label_smoothing),
axis=-1)
# Apply normalization to the class losses.
if self._cls_normalizer < 1.0:
# Build a mask based on the true class locations.
cls_norm_mask = true_class_grid
# Apply the classes weight to class indexes were one_hot is one.
class_loss *= ((1 - cls_norm_mask) +
cls_norm_mask * self._cls_normalizer)
# Mask to the class loss and compute the sum over all the objects.
class_loss = tf.reduce_sum(class_loss, axis=-1)
class_loss = loss_utils.apply_mask(grid_mask, class_loss)
class_loss = math_ops.rm_nan_inf(class_loss, val=0.0)
class_loss = tf.cast(
tf.reduce_sum(class_loss, axis=(1, 2, 3)), dtype=y_pred.dtype)
else:
# Computes the loss while keeping the structure as a list in
# order to ensure all objects are considered. In some cases can
# make training more unstable but may also return higher APs.
pred_class = loss_utils.apply_mask(
ind_mask, tf.gather_nd(pred_class, inds, batch_dims=1))
class_loss = tf.keras.losses.binary_crossentropy(
tf.expand_dims(true_class, axis=-1),
tf.expand_dims(pred_class, axis=-1),
label_smoothing=self._label_smoothing,
from_logits=True)
class_loss = loss_utils.apply_mask(ind_mask, class_loss)
class_loss = math_ops.divide_no_nan(class_loss,
tf.expand_dims(reps, axis=-1))
class_loss = tf.cast(
tf.reduce_sum(class_loss, axis=(1, 2)), dtype=y_pred.dtype)
class_loss *= self._cls_normalizer
# Compute the sigmoid binary cross entropy for the confidence maps.
bce = tf.reduce_mean(
loss_utils.sigmoid_bce(
tf.expand_dims(true_conf, axis=-1), pred_conf, 0.0),
axis=-1)
# Mask the confidence loss and take the sum across all the grid cells.
if self._ignore_thresh != 0.0:
bce = loss_utils.apply_mask(obj_mask, bce)
conf_loss = tf.cast(tf.reduce_sum(bce, axis=(1, 2, 3)), dtype=y_pred.dtype)
# Apply the weights to each loss.
box_loss *= self._iou_normalizer
conf_loss *= self._object_normalizer
# Add all the losses together then take the mean over the batches.
loss = box_loss + class_loss + conf_loss
loss = tf.reduce_mean(loss)
# Reduce the mean of the losses to use as a metric.
box_loss = tf.reduce_mean(box_loss)
conf_loss = tf.reduce_mean(conf_loss)
class_loss = tf.reduce_mean(class_loss)
return (loss, box_loss, conf_loss, class_loss, loss, iou, pred_conf,
ind_mask, grid_mask)
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This method is not specific to each loss path, but each loss type."""
return loss / num_replicas_in_sync
class ScaledLoss(YoloLossBase):
"""This class implements the full logic for the scaled Yolo models."""
def _build_per_path_attributes(self):
"""Paramterization of pair wise search and grid generators.
Objects created here are used for box decoding and dynamic ground truth
association.
"""
self._anchor_generator = loss_utils.GridGenerator(
anchors=self._anchors,
scale_anchors=self._path_stride)
if self._ignore_thresh > 0.0:
self._search_pairs = loss_utils.PairWiseSearch(
iou_type=self._loss_type, any_match=False, min_conf=0.25)
self._cls_normalizer = self._cls_normalizer * self._classes / 80
return
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Per FPN path loss logic for Yolov4-csp, Yolov4-Large, and Yolov5."""
# Generate shape constants.
shape = tf.shape(true_counts)
batch_size, width, height, num = shape[0], shape[1], shape[2], shape[3]
fwidth = tf.cast(width, tf.float32)
fheight = tf.cast(height, tf.float32)
# Cast all input compontnts to float32 and stop gradient to save memory.
y_true = tf.cast(y_true, tf.float32)
true_counts = tf.cast(true_counts, tf.float32)
true_conf = tf.clip_by_value(true_counts, 0.0, 1.0)
grid_points, anchor_grid = self._anchor_generator(
width, height, batch_size, dtype=tf.float32)
# Split the y_true list.
(true_box, ind_mask, true_class) = tf.split(y_true, [4, 1, 1], axis=-1)
grid_mask = true_conf = tf.squeeze(true_conf, axis=-1)
true_class = tf.squeeze(true_class, axis=-1)
num_objs = tf.cast(tf.reduce_sum(ind_mask), dtype=y_pred.dtype)
# Split up the predicitons.
y_pred = tf.cast(
tf.reshape(y_pred, [batch_size, width, height, num, -1]), tf.float32)
pred_box, pred_conf, pred_class = tf.split(y_pred, [4, 1, -1], axis=-1)
# Decode the boxes for loss compute.
scale, pred_box, pbg = self._decode_boxes(
fwidth, fheight, pred_box, anchor_grid, grid_points, darknet=False)
# If the ignore threshold is enabled, search all boxes ignore all
# IOU valeus larger than the ignore threshold that are not in the
# noted ground truth list.
if self._ignore_thresh != 0.0:
(_, obj_mask) = self._tiled_global_box_search(
pbg,
tf.stop_gradient(tf.sigmoid(pred_class)),
boxes,
classes,
true_conf,
smoothed=False,
scale=None)
# Scale and shift and select the ground truth boxes
# and predictions to the prediciton domain.
if self._box_type == 'anchor_free':
true_box = loss_utils.apply_mask(ind_mask,
(scale * self._path_stride * true_box))
else:
offset = tf.cast(
tf.gather_nd(grid_points, inds, batch_dims=1), true_box.dtype)
offset = tf.concat([offset, tf.zeros_like(offset)], axis=-1)
true_box = loss_utils.apply_mask(ind_mask, (scale * true_box) - offset)
pred_box = loss_utils.apply_mask(ind_mask,
tf.gather_nd(pred_box, inds, batch_dims=1))
# Select the correct/used prediction classes.
true_class = tf.one_hot(
tf.cast(true_class, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
true_class = loss_utils.apply_mask(ind_mask, true_class)
pred_class = loss_utils.apply_mask(
ind_mask, tf.gather_nd(pred_class, inds, batch_dims=1))
# Compute the box loss.
_, iou, box_loss = self.box_loss(true_box, pred_box, darknet=False)
box_loss = loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), box_loss)
box_loss = math_ops.divide_no_nan(tf.reduce_sum(box_loss), num_objs)
# Use the box IOU to build the map for confidence loss computation.
iou = tf.maximum(tf.stop_gradient(iou), 0.0)
smoothed_iou = ((
(1 - self._objectness_smooth) * tf.cast(ind_mask, iou.dtype)) +
self._objectness_smooth * tf.expand_dims(iou, axis=-1))
smoothed_iou = loss_utils.apply_mask(ind_mask, smoothed_iou)
true_conf = loss_utils.build_grid(
inds, smoothed_iou, pred_conf, ind_mask, update=self._update_on_repeat)
true_conf = tf.squeeze(true_conf, axis=-1)
# Compute the cross entropy loss for the confidence map.
bce = tf.keras.losses.binary_crossentropy(
tf.expand_dims(true_conf, axis=-1), pred_conf, from_logits=True)
if self._ignore_thresh != 0.0:
bce = loss_utils.apply_mask(obj_mask, bce)
conf_loss = tf.reduce_sum(bce) / tf.reduce_sum(obj_mask)
else:
conf_loss = tf.reduce_mean(bce)
# Compute the cross entropy loss for the class maps.
class_loss = tf.keras.losses.binary_crossentropy(
true_class,
pred_class,
label_smoothing=self._label_smoothing,
from_logits=True)
class_loss = loss_utils.apply_mask(
tf.squeeze(ind_mask, axis=-1), class_loss)
class_loss = math_ops.divide_no_nan(tf.reduce_sum(class_loss), num_objs)
# Apply the weights to each loss.
box_loss *= self._iou_normalizer
class_loss *= self._cls_normalizer
conf_loss *= self._object_normalizer
# Add all the losses together then take the sum over the batches.
mean_loss = box_loss + class_loss + conf_loss
loss = mean_loss * tf.cast(batch_size, mean_loss.dtype)
return (loss, box_loss, conf_loss, class_loss, mean_loss, iou, pred_conf,
ind_mask, grid_mask)
def post_path_aggregation(self, loss, box_loss, conf_loss, class_loss,
ground_truths, predictions):
"""This method allows for post processing of a loss value.
By default the model will have about 3 FPN levels {3, 4, 5}, on
larger model that have more like 4 or 5 FPN levels the loss needs to
be scaled such that the total update is scaled to the same effective
magintude as the model with 3 FPN levels. This helps to prevent gradient
explosions.
Args:
loss: `tf.float` scalar for the actual loss.
box_loss: `tf.float` for the loss on the boxs only.
conf_loss: `tf.float` for the loss on the confidences only.
class_loss: `tf.float` for the loss on the classes only.
ground_truths: `Dict` holding all the ground truth tensors.
predictions: `Dict` holding all the predicted values.
Returns:
loss: `tf.float` scalar for the scaled loss.
scale: `tf.float` how much the loss was scaled by.
"""
scale = tf.stop_gradient(3 / len(list(predictions.keys())))
return loss * scale, 1 / scale
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This method is not specific to each loss path, but each loss type."""
return loss
class YoloLoss:
"""This class implements the aggregated loss across YOLO model FPN levels."""
def __init__(self,
keys,
classes,
anchors,
path_strides=None,
truth_thresholds=None,
ignore_thresholds=None,
loss_types=None,
iou_normalizers=None,
cls_normalizers=None,
object_normalizers=None,
objectness_smooths=None,
box_types=None,
scale_xys=None,
max_deltas=None,
label_smoothing=0.0,
use_scaled_loss=False,
update_on_repeat=True):
"""Loss Function Initialization.
Args:
keys: `List[str]` indicating the name of the FPN paths that need to be
optimized.
classes: `int` for the number of classes
anchors: `List[List[int]]` for the anchor boxes that are used in the model
at all levels. For anchor free prediction set the anchor list to be the
same as the image resolution.
path_strides: `Dict[int]` for how much to scale this level to get the
orginal input shape for each FPN path.
truth_thresholds: `Dict[float]` for the IOU value over which the loss is
propagated despite a detection being made for each FPN path.
ignore_thresholds: `Dict[float]` for the IOU value over which the loss is
not propagated, and a detection is assumed to have been made for each
FPN path.
loss_types: `Dict[str]` for the typeof iou loss to use with in {ciou,
diou, giou, iou} for each FPN path.
iou_normalizers: `Dict[float]` for how much to scale the loss on the IOU
or the boxes for each FPN path.
cls_normalizers: `Dict[float]` for how much to scale the loss on the
classes for each FPN path.
object_normalizers: `Dict[float]` for how much to scale loss on the
detection map for each FPN path.
objectness_smooths: `Dict[float]` for how much to smooth the loss on the
detection map for each FPN path.
box_types: `Dict[bool]` for which scaling type to use for each FPN path.
scale_xys: `Dict[float]` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level. One for each FPN path.
max_deltas: `Dict[float]` for gradient clipping to apply to the box loss
for each FPN path.
label_smoothing: `Dict[float]` for how much to smooth the loss on the
classes for each FPN path.
use_scaled_loss: `bool` for whether to use the scaled loss or the
traditional loss.
update_on_repeat: `bool` for whether to replace with the newest or the
best value when an index is consumed by multiple objects.
"""
losses = {'darknet': DarknetLoss, 'scaled': ScaledLoss}
if use_scaled_loss:
loss_type = 'scaled'
else:
loss_type = 'darknet'
self._loss_dict = {}
for key in keys:
self._loss_dict[key] = losses[loss_type](
classes=classes,
anchors=anchors[key],
truth_thresh=truth_thresholds[key],
ignore_thresh=ignore_thresholds[key],
loss_type=loss_types[key],
iou_normalizer=iou_normalizers[key],
cls_normalizer=cls_normalizers[key],
object_normalizer=object_normalizers[key],
box_type=box_types[key],
objectness_smooth=objectness_smooths[key],
max_delta=max_deltas[key],
path_stride=path_strides[key],
scale_x_y=scale_xys[key],
update_on_repeat=update_on_repeat,
label_smoothing=label_smoothing)
def __call__(self, ground_truth, predictions):
metric_dict = collections.defaultdict(dict)
metric_dict['net']['box'] = 0
metric_dict['net']['class'] = 0
metric_dict['net']['conf'] = 0
loss_val, metric_loss = 0, 0
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
for key in predictions.keys():
(loss, loss_box, loss_conf, loss_class, mean_loss, avg_iou,
avg_obj) = self._loss_dict[key](ground_truth['true_conf'][key],
ground_truth['inds'][key],
ground_truth['upds'][key],
ground_truth['bbox'],
ground_truth['classes'],
predictions[key])
# after computing the loss, scale loss as needed for aggregation
# across FPN levels
loss, scale = self._loss_dict[key].post_path_aggregation(
loss, loss_box, loss_conf, loss_class, ground_truth, predictions)
# after completing the scaling of the loss on each replica, handle
# scaling the loss for mergeing the loss across replicas
loss = self._loss_dict[key].cross_replica_aggregation(
loss, num_replicas_in_sync)
loss_val += loss
# detach all the below gradients: none of them should make a
# contribution to the gradient form this point forwards
metric_loss += tf.stop_gradient(mean_loss / scale)
metric_dict[key]['loss'] = tf.stop_gradient(mean_loss / scale)
metric_dict[key]['avg_iou'] = tf.stop_gradient(avg_iou)
metric_dict[key]['avg_obj'] = tf.stop_gradient(avg_obj)
metric_dict['net']['box'] += tf.stop_gradient(loss_box / scale)
metric_dict['net']['class'] += tf.stop_gradient(loss_class / scale)
metric_dict['net']['conf'] += tf.stop_gradient(loss_conf / scale)
return loss_val, metric_loss, metric_dict
| 31,724 | 41.3 | 90 | py |
models | models-master/official/projects/maxvit/modeling/maxvit_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MaxViT."""
import collections
from typing import Optional, Sequence
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.maxvit.configs import backbones
from official.projects.maxvit.modeling import maxvit
from official.vision.configs import common
class MaxViTBlockTest(tf.test.TestCase):
"""Test the layers of MaxViT."""
def testMaxViTBlockCreation(self) -> None:
"""Ensures that layers can be constructed and forward-props can run."""
inputs_shape = [2, 64, 64, 3]
inp = tf.random.uniform(
shape=inputs_shape, minval=-1.0, maxval=1.0, dtype=tf.float32
)
model = maxvit.MaxViTBlock(
hidden_size=8, head_size=4, window_size=4, grid_size=4
)
out = model(inp, training=False)
self.assertAllEqual([2, 64, 64, 8], out.get_shape().as_list())
self.assertDTypeEqual(tf.reduce_mean(out).numpy(), np.float32)
class MaxViTTest(tf.test.TestCase, parameterized.TestCase):
"""Test the layers of MaxViT."""
@parameterized.named_parameters(
collections.OrderedDict(
testcase_name='MaxViTTest',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
stem_hsize=[12, 12],
num_blocks=[2, 2, 2, 2],
window_size=2,
grid_size=2,
block_type=['maxvit', 'maxvit', 'maxvit'],
hidden_size=[16, 32, 64],
expected_shape=[2, 4, 4, 64],
name='maxvit_test',
),
collections.OrderedDict(
testcase_name='MaxViTTiny',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
block_type=['maxvit', 'maxvit', 'maxvit', 'maxvit'],
stem_hsize=[64, 64],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[96, 192, 384, 768],
expected_shape=[2, 2, 2, 768],
name='maxvit_tiny',
),
collections.OrderedDict(
testcase_name='MaxViTTinyWithPrelogits',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
representation_size=16,
add_gap_layer_norm=True,
block_type=['maxvit', 'maxvit', 'maxvit', 'maxvit'],
stem_hsize=[64, 64],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[96, 192, 384, 768],
expected_shape=[2, 2, 2, 768],
name='maxvit_tiny',
),
)
def testForward(
self,
input_shape: Sequence[int],
input_dtype: Optional[tf.DType] = tf.float32,
**kwargs
) -> None:
"""Ensures that layers can be constructed and forward-props can run."""
inp = tf.random.uniform(
input_shape,
minval=-1.0,
maxval=1.0,
dtype=input_dtype,
)
model = maxvit.MaxViT(**kwargs)
out = model(inp, training=kwargs.get('training', None))
add_gap_layer_norm = kwargs.get('add_gap_layer_norm', False)
if add_gap_layer_norm:
self.assertAllEqual([input_shape[0], kwargs['representation_size']],
out['pre_logits'].get_shape().as_list())
# Remove `pre_logits` if exists.
out.pop('pre_logits', None)
out = out[max(out.keys())]
self.assertAllEqual(kwargs['expected_shape'], out.get_shape().as_list())
self.assertDTypeEqual(tf.reduce_mean(out).numpy(), np.float32)
def testBuildMaxViTWithConfig(self):
backbone_config = backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(
stem_hsize=[32, 32],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[32, 32, 32, 32],
),
)
backbone = maxvit.build_maxvit(
input_specs=tf.keras.layers.InputSpec(shape=[None] + [64, 64, 3]),
backbone_config=backbone_config,
norm_activation_config=common.NormActivation(),
)
self.assertSetEqual(
set(['2', '3', '4', '5']), set(backbone.output_specs.keys())
)
if __name__ == '__main__':
tf.test.main()
| 4,743 | 30.838926 | 76 | py |
models | models-master/official/projects/maxvit/modeling/layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers and Model class for MaxViT."""
import functools
import string
from typing import Any, Callable, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.projects.maxvit.modeling import common_ops
class TrailDense(tf.keras.layers.Layer):
"""Dense module that projects multiple trailing dimensions."""
def __init__(
self,
output_trailing_dims: Union[int, Tuple[int, ...]],
begin_axis: int = -1,
use_bias: bool = True,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'dense',
):
super().__init__(name=name)
if isinstance(output_trailing_dims, int):
self._output_trailing_dims = [output_trailing_dims]
else:
assert isinstance(output_trailing_dims, (list, tuple)) and all(
isinstance(i, int) for i in output_trailing_dims
), f'Invalid output shape: {output_trailing_dims}.'
self._output_trailing_dims = list(output_trailing_dims)
self.begin_axis = begin_axis
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
"""Create variables and einsum expression based on input shape."""
# Create variables
weight_shape = input_shape[self.begin_axis :] + self._output_trailing_dims
self.weight = self.add_weight(
name='weight',
shape=weight_shape,
initializer=self.kernel_initializer,
trainable=True,
)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=self._output_trailing_dims,
initializer=self.bias_initializer,
trainable=True,
)
# Create einsum expression
input_rank = input_shape.rank
shared_size = self.begin_axis % input_rank
i_only_size = input_rank - shared_size
o_only_size = len(self._output_trailing_dims)
assert input_rank + o_only_size < len(
string.ascii_uppercase
), 'Cannot use einsum as input rank + output rank > 26.'
einsum_str = string.ascii_uppercase[: input_rank + o_only_size]
offset = 0
shared_str = einsum_str[offset : offset + shared_size]
offset += shared_size
i_only_str = einsum_str[offset : offset + i_only_size]
offset += i_only_size
o_only_str = einsum_str[offset : offset + o_only_size]
input_str = f'{shared_str}{i_only_str}'
output_str = f'{shared_str}{o_only_str}'
weight_str = f'{i_only_str}{o_only_str}'
# Examples
# - For 4D tensors in conv, a common expr would be 'ABCD,DE->ABCE'.
# - For `q/k/v` head projection in multi-head attention with two output
# trailing dims, the expr is 'ABC,CDE->ABDE'
# - For `o` output projection in multi-head attention with begin_axis = -2,
# the expr is 'ABCD,CDE->ABE'
self.einsum_expr = f'{input_str},{weight_str}->{output_str}'
def call(self, inputs: tf.Tensor) -> tf.Tensor:
output = tf.einsum(self.einsum_expr, inputs, self.weight)
if self.use_bias:
output += self.bias
return output
class Attention(tf.keras.layers.Layer):
"""Multi-headed attention module."""
def __init__(
self,
hidden_size: int,
head_size: int,
input_origin_height: int = 1,
input_origin_width: int = 1,
num_heads: Optional[int] = None,
dropatt: float = 0.0,
attn_axis: int = 0,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[float] = None,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'attention',
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.head_size = head_size
self.input_origin_height = input_origin_height
self.input_origin_width = input_origin_width
self.num_heads = num_heads or hidden_size // head_size
self.dropatt = dropatt
self.attn_axis = attn_axis
self.rel_attn_type = rel_attn_type
self.scale_ratio = scale_ratio
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self._q_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='q',
)
self._k_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='k',
)
self._v_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='v',
)
self._o_proj = TrailDense(
output_trailing_dims=self.hidden_size,
begin_axis=-2,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='o',
)
self.q_scale = self.head_size**-0.5
self.relative_bias = None
def build(self, query_shape: Any) -> None:
##### Content attention
# Einsum expression:
# B = batch_size
# N = num_heads
# K = head_size
# S = query_len (of the given attn_axis)
# T = key/value_len (of the given attn_axis)
# [U-Z] = length of other attension axes
# Example for 5D query_heads, (e.g. images [B x H x W x N x K])
# - when attn_axis = 0 (H axis):
# symbols = 'U' => num_attn_dims = 2
# q_expr = 'BSUNK' => 'S' is inserted, prefix = 'B', suffix = 'NK'
# k_expr = 'BTUNK' => 'T' is inserted, prefix = 'B', suffix = 'NK'
# v_expr = 'BTUNK' => 'T' is inserted, prefix = 'B', suffix = 'NK'
# a_expr = 'BUNST' => 'N x S x T' attention map
num_attn_dims = query_shape.rank - 2 # -2 to account for bsz, hidden size
assert num_attn_dims < 6, 'Only support at most 6 attention dims.'
symbols = ''.join([chr(ord('U') + i) for i in range(num_attn_dims - 1)])
insert = lambda s, i, c: s[:i] + c + s[i:]
create_expr = lambda s, prefix='B', suffix='NK': prefix + s + suffix
self.q_expr = create_expr(insert(symbols, self.attn_axis, 'S'))
self.k_expr = create_expr(insert(symbols, self.attn_axis, 'T'))
self.v_expr = create_expr(insert(symbols, self.attn_axis, 'T'))
self.a_expr = create_expr(symbols, suffix='NST')
##### Relative attention
if self.rel_attn_type in ['2d_multi_head', '2d_single_head']:
query_shape_list = query_shape.as_list()
if query_shape.rank == 4:
height, width = query_shape_list[1:3]
elif query_shape.rank == 3:
seq_len = query_shape_list[1]
height, width = common_ops.get_shape_from_length(
seq_len, self.input_origin_height, self.input_origin_width
)
if height * width != seq_len:
raise ValueError(
'Sequence length: %s violates input size: (%s, %s).'
% (seq_len, height, width)
)
else:
raise ValueError(
'Does not support relative attention for query shape: %s.'
% query_shape_list
)
if self.scale_ratio is not None:
scale_ratio = eval(self.scale_ratio) # pylint:disable=eval-used
vocab_height = 2 * int(height / scale_ratio) - 1
vocab_width = 2 * int(width / scale_ratio) - 1
else:
vocab_height = 2 * height - 1
vocab_width = 2 * width - 1
if self.rel_attn_type == '2d_multi_head':
rel_bias_shape = [self.num_heads, vocab_height, vocab_width]
elif self.rel_attn_type == '2d_single_head':
rel_bias_shape = [vocab_height, vocab_width]
else:
raise NotImplementedError(
f'rel_attn_type {self.rel_attn_type} not implemented yet.'
)
self._feat_height = height
self._feat_width = width
self.relative_bias = self.add_weight(
'relative_bias',
rel_bias_shape,
initializer=self.kernel_initializer,
trainable=True,
)
def call(
self,
query: tf.Tensor,
training: bool,
context: Optional[tf.Tensor] = None,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
if context is None:
context = query
q_heads = self._q_proj(query)
k_heads = self._k_proj(context)
v_heads = self._v_proj(context)
q_heads *= self.q_scale
# attention
attn_logits = tf.einsum(
f'{self.q_expr},{self.k_expr}->{self.a_expr}', q_heads, k_heads
)
if self.relative_bias is not None:
if self.rel_attn_type == '2d_multi_head':
h_axis = 1
else:
h_axis = 0
if self.scale_ratio is not None:
src_shape = self.relative_bias.shape.as_list()
relative_bias = tf.expand_dims(self.relative_bias, axis=-1)
relative_bias = tf.image.resize(
relative_bias, [2 * self._feat_height - 1, 2 * self._feat_width - 1]
)
relative_bias = tf.cast(
tf.squeeze(relative_bias, axis=-1), self.compute_dtype
)
tgt_shape = relative_bias.shape.as_list()
logging.info(
'Bilinear resize relative position bias %s -> %s.',
src_shape,
tgt_shape,
)
else:
relative_bias = tf.cast(self.relative_bias, self.compute_dtype)
reindexed_bias = common_ops.reindex_2d_einsum_lookup(
relative_position_tensor=relative_bias,
height=self._feat_height,
width=self._feat_width,
max_relative_height=self._feat_height - 1,
max_relative_width=self._feat_width - 1,
h_axis=h_axis,
)
attn_logits += reindexed_bias
if attn_mask is not None:
# attn_mask: 1.0 means CAN attend, 0.0 means CANNOT attend
attn_logits += (1.0 - attn_mask) * attn_logits.dtype.min
attn_probs = common_ops.float32_softmax(attn_logits, axis=-1)
if self.dropatt:
attn_probs = tf.keras.layers.Dropout(self.dropatt, 'attn_prob_drop')(
attn_probs, training=training
)
attn_out = tf.einsum(
f'{self.a_expr},{self.v_expr}->{self.q_expr}', attn_probs, v_heads
)
output = self._o_proj(attn_out)
return output
class FFN(tf.keras.layers.Layer):
"""Positionwise feed-forward network."""
def __init__(
self,
hidden_size: int,
dropout: float = 0.0,
expansion_rate: int = 4,
activation: str = 'gelu',
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'ffn',
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.expansion_rate = expansion_rate
self.expanded_size = self.hidden_size * self.expansion_rate
self.dropout = dropout
self.activation = activation
self._expand_dense = TrailDense(
output_trailing_dims=self.expanded_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='expand_dense',
)
self._shrink_dense = TrailDense(
output_trailing_dims=self.hidden_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='shrink_dense',
)
self._activation_fn = common_ops.get_act_fn(self.activation)
def call(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = inputs
output = self._expand_dense(output)
output = self._activation_fn(output)
if self.dropout:
output = tf.keras.layers.Dropout(self.dropout, name='nonlinearity_drop')(
output, training=training
)
output = self._shrink_dense(output)
return output
class TransformerBlock(tf.keras.layers.Layer):
"""Transformer block = Attention + FFN."""
def __init__(
self,
hidden_size: int,
head_size: int,
input_origin_height: int = 1,
input_origin_width: int = 1,
num_heads: Optional[int] = None,
expansion_rate: int = 4,
activation: str = 'gelu',
pool_type: str = '2d:avg',
pool_stride: int = 1,
pool_query_only: bool = False,
dropatt: Optional[Union[float, tf.Tensor]] = None,
dropout: Optional[Union[float, tf.Tensor]] = None,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[str] = None,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'transformer',
) -> None:
super().__init__(name=name)
self._hidden_size = hidden_size
self._head_size = head_size
self._input_origin_height = input_origin_height
self._input_origin_width = input_origin_width
self._num_heads = num_heads
self._expansion_rate = expansion_rate
self._activation = activation
self._pool_type = pool_type
self._pool_stride = pool_stride
self._pool_query_only = pool_query_only
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._scale_ratio = scale_ratio
self._survival_prob = survival_prob
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
if len(input_shape.as_list()) == 4:
_, height, width, _ = input_shape.as_list()
elif len(input_shape.as_list()) == 3:
_, seq_len, _ = input_shape.as_list()
height, width = common_ops.get_shape_from_length(
seq_len, self._input_origin_height, self._input_origin_width
)
else:
raise ValueError(f'Unsupported input shape: {input_shape.as_list()}.')
self.height, self.width = height, width
input_size = input_shape.as_list()[-1]
if input_size != self._hidden_size:
self._shortcut_proj = TrailDense(
self._hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='shortcut_proj',
)
else:
self._shortcut_proj = None
self._attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm',
)
self._attention = Attention(
self._hidden_size,
self._head_size,
height // self._pool_stride,
width // self._pool_stride,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
)
self._ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm',
)
self._ffn = FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
)
def downsample(self, inputs: tf.Tensor, name: str) -> tf.Tensor:
output = inputs
if self._pool_stride > 1:
assert self._pool_type in [
'2d:avg',
'2d:max',
'1d:avg',
'1d:max',
], f'Invalid pool_type {self._pool_type}'
if self._pool_type.startswith('2d'):
output = common_ops.maybe_reshape_to_2d(output, height=self.height)
output = common_ops.pooling_2d(
output,
self._pool_type.split(':')[-1],
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
else:
output = common_ops.pooling_1d(
output,
self._pool_type.split(':')[-1],
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
return output
def shortcut_branch(self, shortcut: tf.Tensor) -> tf.Tensor:
shortcut = self.downsample(shortcut, 'shortcut_pool')
shortcut = common_ops.maybe_reshape_to_1d(shortcut)
if self._shortcut_proj:
shortcut = self._shortcut_proj(shortcut)
return shortcut
def attn_branch(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
output = self._attn_layer_norm(inputs)
if self._pool_query_only:
query = self.downsample(output, 'query_pool')
query = common_ops.maybe_reshape_to_1d(query)
output = common_ops.maybe_reshape_to_1d(output)
output = self._attention(
query, training, context=output, attn_mask=attn_mask
)
else:
output = self.downsample(output, 'residual_pool')
output = common_ops.maybe_reshape_to_1d(output)
output = self._attention(output, training, attn_mask=attn_mask)
return output
def ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._ffn_layer_norm(inputs)
output = self._ffn(output, training)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
logging.info(
'Block %s input shape: %s, (%s).', self.name, inputs.shape, inputs.dtype
)
shortcut = self.shortcut_branch(inputs)
output = self.attn_branch(inputs, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(self._dropout, name='after_attn_drop')(
output, training=training
)
output = common_ops.residual_add(
output, shortcut, self._survival_prob, training
)
shortcut = output
output = self.ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(self._dropout, name='after_ffn_drop')(
output, training=training
)
output = common_ops.residual_add(
output, shortcut, self._survival_prob, training
)
return output
class SqueezeAndExcitation(tf.keras.layers.Layer):
"""Squeeze-and-excitation layer."""
def __init__(
self,
se_filters: int,
output_filters: int,
local_pooling: bool = False,
data_format: str = 'channels_last',
activation: str = 'swish',
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'se',
):
super().__init__(name=name)
self._local_pooling = local_pooling
self._data_format = data_format
self._activation_fn = common_ops.get_act_fn(activation)
# Squeeze and Excitation layer.
self._se_reduce = tf.keras.layers.Conv2D(
se_filters,
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
data_format=self._data_format,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='reduce_conv2d',
)
self._se_expand = tf.keras.layers.Conv2D(
output_filters,
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
data_format=self._data_format,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='expand_conv2d',
)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
h_axis, w_axis = [2, 3] if self._data_format == 'channels_first' else [1, 2]
if self._local_pooling:
se_tensor = tf.nn.avg_pool(
inputs,
ksize=[1, inputs.shape[h_axis], inputs.shape[w_axis], 1],
strides=[1, 1, 1, 1],
padding='VALID',
)
else:
se_tensor = tf.reduce_mean(inputs, [h_axis, w_axis], keepdims=True)
se_tensor = self._se_expand(self._activation_fn(self._se_reduce(se_tensor)))
return tf.sigmoid(se_tensor) * inputs
def _config_batch_norm(
norm_type: str,
ln_epsilon: float = 1e-6,
bn_momentum: float = 0.99,
bn_epsilon: float = 1e-6,
) -> Callable[..., Any]:
"""Defines the normalization class for MbConv based on `norm_type`."""
if norm_type == 'layer_norm':
return functools.partial(
tf.keras.layers.LayerNormalization, epsilon=ln_epsilon
)
elif norm_type == 'batch_norm':
return functools.partial(
tf.keras.layers.BatchNormalization,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
elif norm_type == 'sync_batch_norm':
return functools.partial(
tf.keras.layers.BatchNormalization,
momentum=bn_momentum,
epsilon=bn_epsilon,
synchronized=True,
)
else:
raise ValueError(f'Unsupported norm_type {norm_type}.')
class MBConvBlock(tf.keras.layers.Layer):
"""Mobile Inverted Residual Bottleneck (https://arxiv.org/abs/1905.02244)."""
def __init__(
self,
hidden_size: int,
downsample_loc: str = 'depth_conv',
data_format: str = 'channels_last',
kernel_size: int = 3,
expansion_rate: int = 4,
se_ratio: float = 0.25,
activation: str = 'gelu',
pool_type: str = 'avg',
pool_stride: int = 1,
dropcnn: Optional[float] = None,
survival_prob: Optional[float] = None,
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'mbconv',
):
super().__init__(name=name)
self._hidden_size = hidden_size
self._downsample_loc = downsample_loc
self._data_format = data_format
self._kernel_size = kernel_size
self._expansion_rate = expansion_rate
self._se_ratio = se_ratio
self._activation = activation
self._pool_type = pool_type
self._pool_stride = pool_stride
self._dropcnn = dropcnn
self._survival_prob = survival_prob
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._activation_fn = common_ops.get_act_fn(self._activation)
def build(self, input_shape: tf.TensorShape) -> None:
"""Builds block according to the arguments."""
channel_axis = 3 if self._data_format == 'channels_last' else 1
input_size = input_shape[channel_axis]
inner_size = self._hidden_size * self._expansion_rate
norm_cls = _config_batch_norm(
self._norm_type,
bn_momentum=self._bn_momentum,
bn_epsilon=self._bn_epsilon,
)
# Shortcut projection.
if input_size != self._hidden_size:
self._shortcut_conv = tf.keras.layers.Conv2D(
filters=self._hidden_size,
kernel_size=1,
strides=1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='shortcut_conv',
)
else:
self._shortcut_conv = None
# Pre-Activation norm
self._pre_norm = norm_cls(name='pre_norm')
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
if self._expansion_rate != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=inner_size,
kernel_size=1,
strides=(
self._pool_stride if self._downsample_loc == 'expand_conv' else 1
),
kernel_initializer=self._kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name='expand_conv',
)
self._expand_norm = norm_cls(name='expand_norm')
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=self._kernel_size,
strides=(
self._pool_stride if self._downsample_loc == 'depth_conv' else 1
),
depthwise_initializer=self._kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name='depthwise_conv',
)
self._depthwise_norm = norm_cls(name='depthwise_norm')
if self._se_ratio is not None and 0 < self._se_ratio <= 1:
se_filters = max(1, int(self._hidden_size * self._se_ratio))
self._se = SqueezeAndExcitation(
se_filters=se_filters,
output_filters=inner_size,
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='se',
)
else:
self._se = None
# Output phase.
self._shrink_conv = tf.keras.layers.Conv2D(
filters=self._hidden_size,
kernel_size=1,
strides=1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='shrink_conv',
)
def downsample(self, inputs: tf.Tensor, name: str) -> tf.Tensor:
output = inputs
if self._pool_stride > 1:
output = common_ops.pooling_2d(
output,
self._pool_type,
self._pool_stride,
padding='same',
data_format=self._data_format,
name=name,
)
return output
def shortcut_branch(self, shortcut: tf.Tensor) -> tf.Tensor:
shortcut = self.downsample(shortcut, name='shortcut_pool')
if self._shortcut_conv:
shortcut = self._shortcut_conv(shortcut)
return shortcut
def residual_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._pre_norm(inputs, training=training)
if self._downsample_loc == 'inputs':
output = self.downsample(output, name='residual_pool')
if self._expansion_rate != 1:
output = self._expand_conv(output)
output = self._expand_norm(output, training=training)
output = self._activation_fn(output)
logging.debug('Expand shape: %s', output.shape)
output = self._depthwise_conv(output)
output = self._depthwise_norm(output, training=training)
output = self._activation_fn(output)
logging.debug('DConv shape: %s', output.shape)
if self._dropcnn:
output = tf.keras.layers.Dropout(self._dropcnn, 'after_dconv_drop')(
output, training=training
)
if self._se:
output = self._se(output)
self.endpoints = {'expansion_output': output}
output = self._shrink_conv(output)
logging.debug('Shrink shape: %s', output.shape)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
) -> tf.Tensor:
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.debug(
'Block %s input shape: %s (%s)', self.name, inputs.shape, inputs.dtype
)
residual = self.residual_branch(inputs, training)
shortcut = self.shortcut_branch(inputs)
survival_prob = survival_prob or self._survival_prob
output = common_ops.residual_add(
residual, shortcut, survival_prob, training
)
return output
| 28,290 | 31.706358 | 80 | py |
models | models-master/official/projects/maxvit/modeling/maxvit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-fstring-interpolation
r"""MaxViT layers and model class."""
import functools
from typing import Any, Mapping, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.projects.maxvit.modeling import common_ops as ops
from official.projects.maxvit.modeling import layers
from official.vision.modeling.backbones import factory
MAXVIT_SPECS = {
'maxvit-tiny-for-test': dict(
survival_prob=None,
stem_hsize=(8, 8),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 3, 3, 2),
hidden_size=(32, 32, 32, 768),
),
'maxvit-tiny': dict(
survival_prob=0.8,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 2, 5, 2),
hidden_size=(64, 128, 256, 512),
),
'maxvit-small': dict(
survival_prob=0.7,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 2, 5, 2),
hidden_size=(96, 192, 384, 768),
),
'maxvit-base': dict(
survival_prob=0.6,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(96, 192, 384, 768),
),
'maxvit-large': dict(
survival_prob=0.4,
stem_hsize=(128, 128),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(128, 256, 512, 1024),
),
'maxvit-xlarge': dict(
survival_prob=0.3,
stem_hsize=(192, 192),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(192, 384, 768, 1536),
),
}
class MaxViTBlock(tf.keras.layers.Layer):
"""MaxViT block = MBConv + Block-Attention + FFN + Grid-Attention + FFN."""
def __init__(
self,
hidden_size: int,
head_size: int,
window_size: int,
grid_size: int,
num_heads: Optional[int] = None,
downsample_loc: str = 'depth_conv',
data_format: str = 'channels_last',
kernel_size: int = 3,
expansion_rate: int = 4,
se_ratio: float = 0.25,
activation: str = 'gelu',
pool_type: str = '2d:avg',
pool_stride: int = 1,
dropcnn: Optional[float] = None,
dropatt: Optional[Union[float, tf.Tensor]] = None,
dropout: Optional[Union[float, tf.Tensor]] = None,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[str] = None,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'maxvit_block',
) -> None:
super().__init__(name=name)
self._hidden_size = hidden_size
self._head_size = head_size
self._window_size = window_size
self._grid_size = grid_size
self._num_heads = num_heads
self._downsample_loc = downsample_loc
self._data_format = data_format
self._kernel_size = kernel_size
self._expansion_rate = expansion_rate
self._se_ratio = se_ratio
self._dropcnn = dropcnn
self._activation = activation
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._pool_type = pool_type
self._pool_stride = pool_stride
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._scale_ratio = scale_ratio
self._survival_prob = survival_prob
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
input_size = input_shape.as_list()[-1]
if input_size != self._hidden_size:
self._shortcut_proj = layers.TrailDense(
self._hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='shortcut_proj',
)
else:
self._shortcut_proj = None
self._block_attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm',
)
self._grid_attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm_1',
)
self._block_attention = layers.Attention(
self._hidden_size,
self._head_size,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='attention',
)
self._grid_attention = layers.Attention(
self._hidden_size,
self._head_size,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='attention_1',
)
self._block_ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm',
)
self._grid_ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm_1',
)
self._block_ffn = layers.FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='ffn',
)
self._grid_ffn = layers.FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='ffn_1',
)
self._mbconv = layers.MBConvBlock(
self._hidden_size,
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type='avg' if self._pool_type == '2d:avg' else 'max',
pool_stride=self._pool_stride,
dropcnn=self._dropcnn,
survival_prob=self._survival_prob,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='mbconv',
)
def downsample(self, inputs, name):
output = inputs
if self._pool_stride > 1:
output = ops.maybe_reshape_to_2d(output)
output = ops.pooling_2d(
output,
self._pool_type,
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
return output
def window_partition(self, features: tf.Tensor) -> tf.Tensor:
"""Partition the input feature maps into non-overlapping windows.
Note that unsuitable feature or window sizes may be costly on TPU due to
padding sizes:
https://docs.google.com/document/d/1GojE1Q7hR2qyi0mIfnTHgERfl7Dmsj6xPQ31MQo3xUk/edit#
Args:
features: [B, H, W, C] feature maps.
Returns:
Partitioned features: [B, nH, nW, wSize, wSize, c].
Raises:
ValueError: If the feature map sizes are not divisible by window sizes.
"""
_, h, w, c = features.shape
window_size = self._window_size
if h % window_size != 0 or w % window_size != 0:
raise ValueError(
f'Feature map sizes {(h, w)} '
f'not divisible by window size ({window_size}).'
)
features = tf.reshape(
features,
(-1, h // window_size, window_size, w // window_size, window_size, c),
)
features = tf.transpose(features, (0, 1, 3, 2, 4, 5))
features = tf.reshape(features, (-1, window_size, window_size, c))
return features
def window_stitch_back(
self, features: tf.Tensor, window_size: int, h: int, w: int
) -> tf.Tensor:
"""Reverse window_partition."""
features = tf.reshape(
features,
[
-1,
h // window_size,
w // window_size,
window_size,
window_size,
features.shape[-1],
],
)
return tf.reshape(
tf.transpose(features, (0, 1, 3, 2, 4, 5)),
[-1, h, w, features.shape[-1]],
)
def grid_partition(self, features: tf.Tensor) -> tf.Tensor:
"""Partition the input feature maps into non-overlapping windows.
Note that unsuitable feature or window sizes may be costly on TPU due to
padding sizes:
https://docs.google.com/document/d/1GojE1Q7hR2qyi0mIfnTHgERfl7Dmsj6xPQ31MQo3xUk/edit#
Args:
features: [B, H, W, C] feature maps.
Returns:
Partitioned features: [B, nH, nW, wSize, wSize, c].
Raises:
ValueError: If the feature map sizes are not divisible by window sizes.
"""
_, h, w, c = features.shape
grid_size = self._grid_size
if h % grid_size != 0 or w % grid_size != 0:
raise ValueError(
f'Feature map sizes {(h, w)} '
f'not divisible by window size ({grid_size}).'
)
features = tf.reshape(
features, (-1, grid_size, h // grid_size, grid_size, w // grid_size, c)
)
features = tf.transpose(features, (0, 2, 4, 1, 3, 5))
features = tf.reshape(features, (-1, grid_size, grid_size, c))
return features
def grid_stitch_back(
self, features: tf.Tensor, grid_size: int, h: int, w: int
) -> tf.Tensor:
"""Reverse window_partition."""
features = tf.reshape(
features,
[
-1,
h // grid_size,
w // grid_size,
grid_size,
grid_size,
features.shape[-1],
],
)
return tf.reshape(
tf.transpose(features, (0, 3, 1, 4, 2, 5)),
[-1, h, w, features.shape[-1]],
)
def block_attn_branch(
self, inputs: tf.Tensor, training: bool, attn_mask: tf.Tensor
) -> tf.Tensor:
output = self._block_attn_layer_norm(inputs)
# If put grid-attention in front, we don't need to downsample.
# Apply local block-attention
_, h, w, _ = output.shape
output = self.window_partition(output)
output = ops.maybe_reshape_to_1d(output)
output = self._block_attention(output, training, attn_mask=attn_mask)
output = self.window_stitch_back(output, self._window_size, h, w)
return output
def grid_attn_branch(
self, inputs: tf.Tensor, training: bool, attn_mask: tf.Tensor
) -> tf.Tensor:
output = self._grid_attn_layer_norm(inputs)
# output = self.downsample(output, 'residual_pool')
# Apply global grid
_, h, w, _ = output.shape
output = self.grid_partition(output)
output = ops.maybe_reshape_to_1d(output)
output = self._grid_attention(output, training, attn_mask=attn_mask)
output = self.grid_stitch_back(output, self._grid_size, h, w)
return output
def block_ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._block_ffn_layer_norm(inputs)
output = self._block_ffn(output, training)
return output
def grid_ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._grid_ffn_layer_norm(inputs)
output = self._grid_ffn(output, training)
return output
def mbconv_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._mbconv(inputs, training=training)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
logging.debug(
'Block %s input shape: %s (%s)', self.name, inputs.shape, inputs.dtype
)
# MBConv
output = self.mbconv_branch(inputs, training)
# block self-attention
shortcut = output
output = self.block_attn_branch(output, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_block_attn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
shortcut = output
output = self.block_ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_block_ffn_drop_1'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
# grid self-attention
shortcut = output
output = self.grid_attn_branch(output, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_grid_attn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
shortcut = output
output = self.grid_ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_grid_ffn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
return output
class MaxViT(tf.keras.Model):
"""MaxViT's backbone that outputs the pre-global-pooled features."""
def __init__(
self,
block_type: Tuple[str, ...],
num_blocks: Tuple[int, ...],
hidden_size: Tuple[int, ...],
stem_hsize: Tuple[int, ...],
head_size: int = 32,
num_heads: Optional[int] = None,
dropatt: Optional[float] = None,
dropout: Optional[float] = None,
rel_attn_type: str = '2d_multi_head',
window_size: int = 7,
grid_size: int = 7,
scale_ratio: Optional[str] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
downsample_loc: str = 'depth_conv',
kernel_size: int = 3,
se_ratio: float = 0.25,
dropcnn: Optional[float] = None,
data_format: str = 'channels_last',
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
add_pos_enc: bool = False,
pool_type: str = '2d:avg',
pool_stride: int = 2,
expansion_rate: int = 4,
activation: str = 'gelu',
survival_prob: Optional[float] = None,
survival_prob_anneal: bool = True,
representation_size: Optional[int] = None,
add_gap_layer_norm: bool = False,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'maxvit',
**kwargs,
):
"""Initializes MaxViT backbone.
Args:
block_type: a tuple of `str`, specify each block type.
num_blocks: a tuple of `int`, specify the number of blocks in each stage.
hidden_size: a tuple of `int`, specify hidden size of block in each stage.
stem_hsize: a tuple of `int`, specify the hidden size of stem network.
head_size: embedding size of each attention head.
num_heads: number of attention head.
dropatt: an optional float of attention dropout rate.
dropout: an optional float of dropping rate for dropout regularization.
rel_attn_type: =a `str` specify the type of relative attention head,
possible values are ['2d_multi_head', '2d_single_head'].
window_size: window size for conducting block attention module.
grid_size: grid size for conducting sparse global grid attention.
scale_ratio: a optional string for finetuning at different window size,
e.g. '14/7'.
ln_epsilon: layer normalization epsilon.
ln_dtype: layer normalization data type.
downsample_loc: location to conduct downsampleing to feature maps.
kernel_size: stem convoluation kernal size.
se_ratio: se ratio for `mbconv` block.
dropcnn: an optional float of CNN dropout rate.
data_format: image data format, usualy 'channels_last'.
norm_type: normalization type, one of ['batch_norm', 'sync_batch_norm',
'layer_norm'].
bn_epsilon: batch normalization epsilon.
bn_momentum: batch normalization momentum.
add_pos_enc: if add position embedding.
pool_type: pooling operation type, one of ['2d:avg', '2d:max', '1d:avg',
'1d:max'].
pool_stride: pooling stride size.
expansion_rate: expansion rate value.
activation: activate function.
survival_prob: survival probability.
survival_prob_anneal: if anneal survival probability.
representation_size: an optional `int` of representation size.
add_gap_layer_norm: if add layer norm to GAP of backbone final output.
kernel_initializer: kernel initializer.
bias_initializer: bias initializer.
name: specify module name.
**kwargs: extra keyword arguments to be passed.
"""
super().__init__(name=name)
self._block_type = block_type
self._num_blocks = num_blocks
self._hidden_size = hidden_size
self._stem_hsize = stem_hsize
self._head_size = head_size
self._num_heads = num_heads
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._window_size = window_size
self._grid_size = grid_size
self._scale_ratio = scale_ratio
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._downsample_loc = downsample_loc
self._kernel_size = kernel_size
self._se_ratio = se_ratio
self._dropcnn = dropcnn
self._data_format = data_format
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._add_pos_enc = add_pos_enc
self._pool_type = pool_type
self._pool_stride = pool_stride
self._expansion_rate = expansion_rate
self._activation = activation
self._survival_prob = survival_prob
self._survival_prob_anneal = survival_prob_anneal
self._representation_size = representation_size
self._add_gap_layer_norm = add_gap_layer_norm
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._output_specs = {}
def build(self, input_shape: tf.TensorShape) -> None:
if self._norm_type == 'layer_norm':
bn_class = functools.partial(
tf.keras.layers.LayerNormalization, epsilon=self._ln_epsilon
)
elif self._norm_type == 'batch_norm':
bn_class = functools.partial(
tf.keras.layers.BatchNormalization,
momentum=self._bn_momentum,
epsilon=self._bn_epsilon,
)
elif self._norm_type == 'sync_batch_norm':
bn_class = functools.partial(
tf.keras.layers.BatchNormalization,
momentum=self._bn_momentum,
epsilon=self._bn_epsilon,
synchronized=True,
)
else:
raise ValueError(f'Unsupported norm_type {self._norm_type}.')
_, self.height, self.width, _ = input_shape.as_list()
logging.info(
f'Build backbone with input size: ({self.height}, {self.width}).'
)
# Stem
stem_layers = []
for i, _ in enumerate(self._stem_hsize):
conv_layer = tf.keras.layers.Conv2D(
filters=self._stem_hsize[i],
kernel_size=self._kernel_size,
strides=2 if i == 0 else 1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='conv_{}'.format(i),
)
stem_layers.append(conv_layer)
if i < len(self._stem_hsize) - 1:
stem_layers.append(bn_class(name='norm_{}'.format(i)))
stem_layers.append(
tf.keras.layers.Activation(
ops.get_act_fn(self._activation), name=f'act_{i}'
)
)
self._stem = tf.keras.Sequential(layers=stem_layers, name='stem')
# Backbone
self._blocks = []
total_num_blocks = sum(self._num_blocks)
bid = 0
for i, _ in enumerate(self._block_type):
self._blocks.append([])
for j in range(self._num_blocks[i]):
# block name
block_name = f'block_{i:0>2d}_{j:0>2d}'
##### Update per-block config
# No pooling if not the first block in the stage
if j == 0:
pool_stride = self._pool_stride
else:
pool_stride = 1
# anneal the survival prob
survival_prob = self._survival_prob
if survival_prob and self._survival_prob_anneal:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * bid / total_num_blocks
logging.info(
'[%02d/%02d] %s survival_prob: %.4f',
bid,
total_num_blocks,
block_name,
survival_prob,
)
##### Init block
if self._block_type[i] == 'tfm':
block = layers.TransformerBlock(
hidden_size=self._hidden_size[i],
head_size=self._head_size,
input_origin_height=self.height,
input_origin_width=self.width,
num_heads=self._num_heads,
expansion_rate=self._expansion_rate,
activation=self._activation,
pool_type=self._pool_type,
pool_stride=pool_stride,
dropatt=self._dropatt,
dropout=self._dropout,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
survival_prob=survival_prob,
ln_epsilon=self._ln_epsilon,
ln_dtype=self._ln_dtype,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
elif self._block_type[i] == 'mbconv':
assert self._pool_type in ['2d:max', '2d:avg'], (
'Invalid pool_type %s for MBConv block' % self._pool_type
)
pool_type = self._pool_type.split(':')[-1]
block = layers.MBConvBlock(
hidden_size=self._hidden_size[i],
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type=pool_type,
pool_stride=pool_stride,
dropcnn=self._dropcnn,
survival_prob=survival_prob,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
elif self._block_type[i] == 'maxvit':
block = MaxViTBlock(
hidden_size=self._hidden_size[i],
head_size=self._head_size,
window_size=self._window_size,
grid_size=self._grid_size,
num_heads=self._num_heads,
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type=self._pool_type,
pool_stride=pool_stride,
dropcnn=self._dropcnn,
dropatt=self._dropatt,
dropout=self._dropout,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
survival_prob=survival_prob,
ln_epsilon=self._ln_epsilon,
ln_dtype=self._ln_dtype,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
else:
raise ValueError(f'Unsupported block_type {self._block_type[i]}')
self._blocks[-1].append(block)
bid += 1
if self._representation_size and self._representation_size > 0:
self._dense = tf.keras.layers.Dense(
self._representation_size, name='pre_logits')
if self._add_gap_layer_norm:
self._final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=self._ln_epsilon, name='final_layer_norm')
def _add_absolute_position_encoding(self, inputs: tf.Tensor) -> tf.Tensor:
"""Add absolute sinusoid position encoding, which is computed on the fly."""
output = ops.maybe_reshape_to_2d(inputs)
h, w = tf.shape(output)[1], tf.shape(output)[2]
enc_size = output.shape.as_list()[-1] // 2
# sinusoid positional encoding that can be generated online
h_seq = tf.range(-h / 2, h / 2)
w_seq = tf.range(-w / 2, w / 2)
pos_enc_h = ops.absolute_position_encoding(
h_seq, enc_size, dtype=output.dtype
)
pos_enc_w = ops.absolute_position_encoding(
w_seq, enc_size, dtype=output.dtype
)
abs_pos_enc = tf.concat(
[
tf.tile(pos_enc_h[:, None, :], [1, w, 1]),
tf.tile(pos_enc_w[None, :, :], [h, 1, 1]),
],
axis=-1,
)
output += abs_pos_enc
if inputs.shape.rank == 3:
output = ops.maybe_reshape_to_1d(output)
return output
def call(
self, inputs: tf.Tensor, mask: Optional[Any] = None, training: bool = None
) -> Mapping[str, tf.Tensor]:
logging.info(
'MaxViT inputs: shape %s, dtype %s.', inputs.shape, inputs.dtype
)
output = self._stem(inputs, training=training)
logging.info(
'Stage 0 (stem) output: shape %s, dtype %s.', output.shape, output.dtype
)
endpoints = {}
add_pos_enc = self._add_pos_enc
for idx, stage_blocks in enumerate(self._blocks):
# Add position encoding
# Note: the position encoding is usually added to the input of the first
# transformer block. For MaxViT, it is the first block of stage 3.
if (isinstance(add_pos_enc, (tuple, list)) and add_pos_enc[idx]) or (
isinstance(add_pos_enc, bool) and add_pos_enc
):
logging.info('Add position encoding at stage %d.', idx + 1)
output = self._add_absolute_position_encoding(output)
# Blocks forward
for block in stage_blocks:
output = block(output, training=training)
if self._block_type[idx] == 'tfm':
height, width = ops.get_shape_from_length(
output.shape[1], self.height, self.width
)
output = tf.reshape(output, [-1, height, width, output.shape[-1]])
endpoints[str(idx + 2)] = output
logging.info(
'Stage %d output: feature level %s shape %s, dtype %s.',
idx + 1,
idx + 2,
output.shape,
output.dtype,
)
self._output_specs = {
idx: endpoint.get_shape() for idx, endpoint in endpoints.items()
}
if self._representation_size and self._representation_size > 0:
# Backbone's output is [batch_size, height, weight, channel_size].
output = tf.keras.layers.GlobalAveragePooling2D()(output)
# Maybe add a layer_norm after global average pooling.
if self._add_gap_layer_norm:
output = self._final_layer_norm(output)
endpoints['pre_logits'] = tf.nn.tanh(self._dense(output))
return endpoints
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
def override_predefined_spec_and_build_maxvit(
predefined_maxvit_spec, backbone_cfg, norm_activation_config
):
"""Builds a MaxViT backbone.
Args:
predefined_maxvit_spec: a dict predefined maxvit specifications.
backbone_cfg: the MaxViT backbone config.
norm_activation_config: normalization and activation config.
Returns:
The built MaxViT backbone.
"""
survival_prob = (
predefined_maxvit_spec['survival_prob']
if backbone_cfg.survival_prob is None
else backbone_cfg.survival_prob
)
stem_hsize = (
predefined_maxvit_spec['stem_hsize']
if backbone_cfg.stem_hsize is None
else backbone_cfg.stem_hsize
)
block_type = (
predefined_maxvit_spec['block_type']
if backbone_cfg.block_type is None
else backbone_cfg.block_type
)
num_blocks = (
predefined_maxvit_spec['num_blocks']
if backbone_cfg.num_blocks is None
else backbone_cfg.num_blocks
)
hidden_size = (
predefined_maxvit_spec['hidden_size']
if backbone_cfg.hidden_size is None
else backbone_cfg.hidden_size
)
logging.info(
(
'Final MaxViT specs: survival_prob=%s, stem_hsize=%s, hidden_size=%s,'
'block_type=%s, num_blocks=%s,.'
),
survival_prob,
stem_hsize,
hidden_size,
block_type,
num_blocks,
)
return MaxViT(
block_type=block_type,
num_blocks=num_blocks,
hidden_size=hidden_size,
stem_hsize=stem_hsize,
head_size=backbone_cfg.head_size,
dropatt=backbone_cfg.dropatt,
dropout=backbone_cfg.dropout,
rel_attn_type=backbone_cfg.rel_attn_type,
window_size=backbone_cfg.window_size,
grid_size=backbone_cfg.grid_size,
scale_ratio=backbone_cfg.scale_ratio,
ln_epsilon=backbone_cfg.ln_epsilon,
ln_dtype=backbone_cfg.ln_dtype,
downsample_loc=backbone_cfg.downsample_loc,
kernel_size=backbone_cfg.kernel_size,
se_ratio=backbone_cfg.se_ratio,
dropcnn=backbone_cfg.dropcnn,
data_format=backbone_cfg.data_format,
norm_type=backbone_cfg.norm_type,
bn_epsilon=norm_activation_config.norm_epsilon,
bn_momentum=norm_activation_config.norm_momentum,
add_pos_enc=backbone_cfg.add_pos_enc,
pool_type=backbone_cfg.pool_type,
pool_stride=backbone_cfg.pool_stride,
expansion_rate=backbone_cfg.expansion_rate,
activation=norm_activation_config.activation,
survival_prob=survival_prob,
survival_prob_anneal=backbone_cfg.survival_prob_anneal,
representation_size=backbone_cfg.representation_size,
add_gap_layer_norm=backbone_cfg.add_gap_layer_norm,
kernel_initializer=backbone_cfg.kernel_initializer,
bias_initializer=backbone_cfg.bias_initializer,
)
@factory.register_backbone_builder('maxvit')
def build_maxvit(
input_specs,
backbone_config,
norm_activation_config,
l2_regularizer=None,
):
"""Builds a MaxViT backbone."""
del l2_regularizer
backbone_cfg = backbone_config.get()
maxvit = override_predefined_spec_and_build_maxvit(
predefined_maxvit_spec=MAXVIT_SPECS[backbone_cfg.model_name],
backbone_cfg=backbone_cfg,
norm_activation_config=norm_activation_config,
)
# Build the backbone to get a proper `output_specs`.
dummy_inputs = tf.keras.Input(input_specs.shape[1:])
_ = maxvit(dummy_inputs, training=False)
return maxvit
| 32,299 | 33.582441 | 89 | py |
models | models-master/official/projects/maxvit/modeling/common_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common operations."""
import functools
import math
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
def activation_fn(features: tf.Tensor, act_fn: str):
"""Customized non-linear activation type."""
if act_fn in ('silu', 'swish'):
return tf.nn.swish(features)
elif act_fn == 'silu_native':
return features * tf.sigmoid(features)
elif act_fn == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
elif act_fn == 'relu':
return tf.nn.relu(features)
elif act_fn == 'relu6':
return tf.nn.relu6(features)
elif act_fn == 'elu':
return tf.nn.elu(features)
elif act_fn == 'leaky_relu':
return tf.nn.leaky_relu(features)
elif act_fn == 'selu':
return tf.nn.selu(features)
elif act_fn == 'mish':
return features * tf.math.tanh(tf.math.softplus(features))
elif act_fn == 'gelu':
return (
0.5
* features
* (
1
+ tf.tanh(
np.sqrt(2 / np.pi) * (features + 0.044715 * tf.pow(features, 3))
)
)
)
else:
raise ValueError('Unsupported act_fn {}'.format(act_fn))
def get_act_fn(act_fn):
if act_fn is None:
act_fn = 'gelu'
if isinstance(act_fn, str):
return functools.partial(activation_fn, act_fn=act_fn)
elif callable(act_fn):
return act_fn
else:
raise ValueError('Unsupported act_fn %s.' % act_fn)
def pooling_2d(inputs, pool_type, stride, **kwargs):
"""Perform 2D pooling."""
if stride > 1:
if pool_type == 'max':
pool_op = tf.keras.layers.MaxPool2D
elif pool_type == 'avg':
pool_op = tf.keras.layers.AveragePooling2D
else:
raise ValueError('Unsurpported pool_type %s' % pool_type)
output = pool_op(
pool_size=(stride, stride), strides=(stride, stride), **kwargs
)(inputs)
else:
output = inputs
return output
def drop_connect(inputs, training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random.uniform([batch_size], dtype=inputs.dtype)
for _ in range(inputs.shape.rank - 1):
random_tensor = tf.expand_dims(random_tensor, axis=-1)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = inputs / survival_prob * binary_tensor
return output
def residual_add(residual, shortcut, survival_prob, training):
"""Combine residual and shortcut."""
if survival_prob is not None and 0 < survival_prob < 1:
residual = drop_connect(residual, training, survival_prob)
return shortcut + residual
def maybe_reshape_to_2d(x, height=None):
"""Reshape tensor to 2d if not already 2d."""
if x.shape.rank == 3:
_, length, num_channel = x.shape.as_list()
if height is None:
height = int(np.sqrt(length))
else:
assert length % height == 0
width = length // height
logging.debug(
'Reshape %s -> %s', [length, num_channel], [height, width, num_channel]
)
return tf.reshape(x, [-1, height, width, num_channel])
elif x.shape.rank == 4:
return x
else:
raise ValueError('Unsupport shape {}'.format(x.shape))
def maybe_reshape_to_1d(x):
"""Reshape tensor to 1d if not already 1d."""
if x.shape.rank == 4:
_, h, w, num_channel = x.shape.as_list()
logging.debug('Reshape %s -> %s', [h, w, num_channel], [h * w, num_channel])
return tf.reshape(x, [-1, h * w, num_channel])
elif x.shape.rank == 3:
return x
else:
raise ValueError('Unsupport shape {}'.format(x.shape))
def generate_lookup_tensor(
length: int,
max_relative_position: Optional[int] = None,
clamp_out_of_range: bool = False,
dtype: tf.DType = tf.float32) -> tf.Tensor:
"""Generate a one_hot lookup tensor to reindex embeddings along one dimension.
Args:
length: the length to reindex to.
max_relative_position: the maximum relative position to consider.
Relative position embeddings for distances above this threshold
are zeroed out.
clamp_out_of_range: bool. Whether to clamp out of range locations to the
maximum relative distance. If False, the out of range locations will be
filled with all-zero vectors.
dtype: dtype for the returned lookup tensor.
Returns:
ret: [length, length, vocab_size] lookup tensor that satisfies
ret[n,m,v] = 1{m - n + max_relative_position = v}.
"""
if max_relative_position is None:
max_relative_position = length - 1
vocab_size = 2 * max_relative_position + 1
ret = np.zeros((length, length, vocab_size))
for i in range(length):
for x in range(length):
v = x - i + max_relative_position
if abs(x - i) > max_relative_position:
if clamp_out_of_range:
v = np.clip(v, 0, vocab_size - 1)
else:
continue
ret[i, x, v] = 1
return tf.constant(ret, dtype)
def reindex_2d_einsum_lookup(
relative_position_tensor: tf.Tensor,
height: int,
width: int,
max_relative_height: Optional[int] = None,
max_relative_width: Optional[int] = None,
h_axis=None) -> tf.Tensor:
"""Reindex 2d relative position bias with 2 independent einsum lookups.
Args:
relative_position_tensor: tensor of shape
[..., vocab_height, vocab_width, ...].
height: height to reindex to.
width: width to reindex to.
max_relative_height: maximum relative height.
Position embeddings corresponding to vertical distances larger
than max_relative_height are zeroed out. None to disable.
max_relative_width: maximum relative width.
Position embeddings corresponding to horizontal distances larger
than max_relative_width are zeroed out. None to disable.
h_axis: Axis corresponding to vocab_height. Default to 0 if None.
Returns:
reindexed_bias: a Tensor of shape
[..., height * width, height * width, ...]
"""
height_lookup = generate_lookup_tensor(
height, max_relative_position=max_relative_height,
dtype=relative_position_tensor.dtype)
width_lookup = generate_lookup_tensor(
width, max_relative_position=max_relative_width,
dtype=relative_position_tensor.dtype)
if h_axis is None:
h_axis = 0
non_spatial_rank = relative_position_tensor.shape.rank - 2
non_spatial_expr = ''.join(chr(ord('n') + i) for i in range(non_spatial_rank))
prefix = non_spatial_expr[:h_axis]
suffix = non_spatial_expr[h_axis:]
reindexed_tensor = tf.einsum(
'{0}hw{1},ixh->{0}ixw{1}'.format(prefix, suffix),
relative_position_tensor, height_lookup, name='height_lookup')
reindexed_tensor = tf.einsum(
'{0}ixw{1},jyw->{0}ijxy{1}'.format(prefix, suffix),
reindexed_tensor, width_lookup, name='width_lookup')
ret_shape = relative_position_tensor.shape.as_list()
ret_shape[h_axis] = height * width
ret_shape[h_axis + 1] = height * width
reindexed_tensor = tf.reshape(reindexed_tensor, ret_shape)
return reindexed_tensor
def float32_softmax(x: tf.Tensor, *args, **kwargs) -> tf.Tensor:
y = tf.cast(tf.nn.softmax(tf.cast(x, tf.float32), *args, **kwargs), x.dtype)
return y
def get_shape_from_length(length: int, height: int = 1, width: int = 1):
"""Gets input 2D shape from 1D sequence length."""
input_height = int(math.sqrt(length * height // width))
input_width = input_height * width // height
if input_height * input_width != length:
raise ValueError(
f'Invalid sequence length: {length} or shape: ({height, width}).'
)
return (input_height, input_width)
| 8,454 | 32.418972 | 80 | py |
models | models-master/official/projects/video_ssl/modeling/video_ssl_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build video classification models."""
from typing import Mapping, Optional
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.video_ssl.configs import video_ssl as video_ssl_cfg
from official.vision.modeling import backbones
from official.vision.modeling import factory_3d as model_factory
layers = tf.keras.layers
class VideoSSLModel(tf.keras.Model):
"""A video ssl model class builder."""
def __init__(self,
backbone,
normalize_feature,
hidden_dim,
hidden_layer_num,
hidden_norm_args,
projection_dim,
input_specs: Optional[Mapping[str,
tf.keras.layers.InputSpec]] = None,
dropout_rate: float = 0.0,
aggregate_endpoints: bool = False,
kernel_initializer='random_uniform',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Video Classification initialization function.
Args:
backbone: a 3d backbone network.
normalize_feature: whether normalize backbone feature.
hidden_dim: `int` number of hidden units in MLP.
hidden_layer_num: `int` number of hidden layers in MLP.
hidden_norm_args: `dict` for batchnorm arguments in MLP.
projection_dim: `int` number of output dimension for MLP.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
aggregate_endpoints: `bool` aggregate all end ponits or only use the
final end point.
kernel_initializer: kernel initializer for the dense layer.
kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
bias_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
**kwargs: keyword arguments to be passed.
"""
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'normalize_feature': normalize_feature,
'hidden_dim': hidden_dim,
'hidden_layer_num': hidden_layer_num,
'use_sync_bn': hidden_norm_args.use_sync_bn,
'norm_momentum': hidden_norm_args.norm_momentum,
'norm_epsilon': hidden_norm_args.norm_epsilon,
'activation': hidden_norm_args.activation,
'projection_dim': projection_dim,
'input_specs': input_specs,
'dropout_rate': dropout_rate,
'aggregate_endpoints': aggregate_endpoints,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._input_specs = input_specs
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._backbone = backbone
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
endpoints = backbone(inputs['image'])
if aggregate_endpoints:
pooled_feats = []
for endpoint in endpoints.values():
x_pool = tf.keras.layers.GlobalAveragePooling3D()(endpoint)
pooled_feats.append(x_pool)
x = tf.concat(pooled_feats, axis=1)
else:
x = endpoints[max(endpoints.keys())]
x = tf.keras.layers.GlobalAveragePooling3D()(x)
# L2 Normalize feature after backbone
if normalize_feature:
x = tf.nn.l2_normalize(x, axis=-1)
# MLP hidden layers
for _ in range(hidden_layer_num):
x = tf.keras.layers.Dense(hidden_dim)(x)
if self._config_dict['use_sync_bn']:
x = tf.keras.layers.experimental.SyncBatchNormalization(
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])(x)
else:
x = tf.keras.layers.BatchNormalization(
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])(x)
x = tf_utils.get_activation(self._config_dict['activation'])(x)
# Projection head
x = tf.keras.layers.Dense(projection_dim)(x)
super().__init__(inputs=inputs, outputs=x, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@model_factory.register_model_builder('video_ssl_model')
def build_video_ssl_pretrain_model(
input_specs: tf.keras.layers.InputSpec,
model_config: video_ssl_cfg.VideoSSLModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds the video classification model."""
del num_classes
input_specs_dict = {'image': input_specs}
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
# Norm layer type in the MLP head should same with backbone
assert model_config.norm_activation.use_sync_bn == model_config.hidden_norm_activation.use_sync_bn
model = VideoSSLModel(
backbone=backbone,
normalize_feature=model_config.normalize_feature,
hidden_dim=model_config.hidden_dim,
hidden_layer_num=model_config.hidden_layer_num,
hidden_norm_args=model_config.hidden_norm_activation,
projection_dim=model_config.projection_dim,
input_specs=input_specs_dict,
dropout_rate=model_config.dropout_rate,
aggregate_endpoints=model_config.aggregate_endpoints,
kernel_regularizer=l2_regularizer)
return model
| 6,601 | 35.882682 | 100 | py |
models | models-master/official/projects/video_ssl/tasks/pretrain.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video ssl pretrain task definition."""
from absl import logging
import tensorflow as tf
# pylint: disable=unused-import
from official.core import input_reader
from official.core import task_factory
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.dataloaders import video_ssl_input
from official.projects.video_ssl.losses import losses
from official.projects.video_ssl.modeling import video_ssl_model
from official.vision.modeling import factory_3d
from official.vision.tasks import video_classification
# pylint: enable=unused-import
@task_factory.register_task_cls(exp_cfg.VideoSSLPretrainTask)
class VideoSSLPretrainTask(video_classification.VideoClassificationTask):
"""A task for video ssl pretraining."""
def build_model(self):
"""Builds video ssl pretraining model."""
common_input_shape = [
d1 if d1 == d2 else None
for d1, d2 in zip(self.task_config.train_data.feature_shape,
self.task_config.validation_data.feature_shape)
]
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
model = factory_3d.build_model(
self.task_config.model.model_type,
input_specs=input_specs,
model_config=self.task_config.model,
num_classes=self.task_config.train_data.num_classes)
return model
def _get_decoder_fn(self, params):
decoder = video_ssl_input.Decoder()
return decoder.decode
def build_inputs(self, params: exp_cfg.DataConfig, input_context=None):
"""Builds classification input."""
parser = video_ssl_input.Parser(input_params=params)
postprocess_fn = video_ssl_input.PostBatchProcessor(params)
reader = input_reader.InputReader(
params,
dataset_fn=self._get_dataset_fn(params),
decoder_fn=self._get_decoder_fn(params),
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, model_outputs, num_replicas, model):
"""Sparse categorical cross entropy loss.
Args:
model_outputs: Output logits of the model.
num_replicas: distributed replica number.
model: keras model for calculating weight decay.
Returns:
The total loss tensor.
"""
all_losses = {}
contrastive_metrics = {}
losses_config = self.task_config.losses
total_loss = None
contrastive_loss_dict = losses.contrastive_loss(
model_outputs, num_replicas, losses_config.normalize_hidden,
losses_config.temperature, model,
self.task_config.losses.l2_weight_decay)
total_loss = contrastive_loss_dict['total_loss']
all_losses.update({
'total_loss': total_loss
})
all_losses[self.loss] = total_loss
contrastive_metrics.update({
'contrast_acc': contrastive_loss_dict['contrast_acc'],
'contrast_entropy': contrastive_loss_dict['contrast_entropy'],
'reg_loss': contrastive_loss_dict['reg_loss']
})
return all_losses, contrastive_metrics
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation."""
metrics = [
tf.keras.metrics.Mean(name='contrast_acc'),
tf.keras.metrics.Mean(name='contrast_entropy'),
tf.keras.metrics.Mean(name='reg_loss')
]
return metrics
def process_metrics(self, metrics, contrastive_metrics):
"""Process and update metrics."""
contrastive_metric_values = contrastive_metrics.values()
for metric, contrastive_metric_value in zip(metrics,
contrastive_metric_values):
metric.update_state(contrastive_metric_value)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, _ = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
if self.task_config.train_data.output_audio:
outputs = model(features, training=True)
else:
outputs = model(features['image'], training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
all_losses, contrastive_metrics = self.build_losses(
model_outputs=outputs, num_replicas=num_replicas,
model=model)
loss = all_losses[self.loss]
scaled_loss = loss
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = all_losses
if metrics:
self.process_metrics(metrics, contrastive_metrics)
logs.update({m.name: m.result() for m in metrics})
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
raise NotImplementedError
def inference_step(self, features, model):
"""Performs the forward step."""
raise NotImplementedError
| 6,748 | 35.090909 | 78 | py |
models | models-master/official/projects/video_ssl/tasks/linear_eval.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video ssl linear evaluation task definition."""
from typing import Any, Optional, List, Tuple
from absl import logging
import tensorflow as tf
# pylint: disable=unused-import
from official.core import task_factory
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.modeling import video_ssl_model
from official.vision.tasks import video_classification
@task_factory.register_task_cls(exp_cfg.VideoSSLEvalTask)
class VideoSSLEvalTask(video_classification.VideoClassificationTask):
"""A task for video ssl linear evaluation."""
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
ckpt.read(ckpt_dir_or_file)
else:
raise NotImplementedError
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
model.backbone.trainable = False
logging.info('Setting the backbone to non-trainable.')
return super().train_step(inputs, model, optimizer, metrics)
| 2,521 | 35.028571 | 74 | py |
models | models-master/official/projects/video_ssl/losses/losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define losses."""
# Import libraries
import tensorflow as tf
from tensorflow.compiler.tf2xla.python import xla
def contrastive_loss(hidden,
num_replicas,
normalize_hidden,
temperature,
model,
weight_decay):
"""Computes contrastive loss.
Args:
hidden: embedding of video clips after projection head.
num_replicas: number of distributed replicas.
normalize_hidden: whether or not to l2 normalize the hidden vector.
temperature: temperature in the InfoNCE contrastive loss.
model: keras model for calculating weight decay.
weight_decay: weight decay parameter.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
large_num = 1e9
hidden1, hidden2 = tf.split(hidden, num_or_size_splits=2, axis=0)
if normalize_hidden:
hidden1 = tf.math.l2_normalize(hidden1, -1)
hidden2 = tf.math.l2_normalize(hidden2, -1)
batch_size = tf.shape(hidden1)[0]
if num_replicas == 1:
# This is the local version
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
else:
# This is the cross-tpu version.
hidden1_large = tpu_cross_replica_concat(hidden1, num_replicas)
hidden2_large = tpu_cross_replica_concat(hidden2, num_replicas)
enlarged_batch_size = tf.shape(hidden1_large)[0]
replica_id = tf.cast(tf.cast(xla.replica_id(), tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels = tf.one_hot(labels_idx, enlarged_batch_size * 2)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
logits_aa = logits_aa - tf.cast(masks, logits_aa.dtype) * large_num
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - tf.cast(masks, logits_bb.dtype) * large_num
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
loss_a = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ab, logits_aa], 1)))
loss_b = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ba, logits_bb], 1)))
loss = loss_a + loss_b
l2_loss = weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in model.trainable_variables
if 'kernel' in v.name
])
total_loss = loss + tf.cast(l2_loss, loss.dtype)
contrast_prob = tf.nn.softmax(logits_ab)
contrast_entropy = - tf.reduce_mean(
tf.reduce_sum(contrast_prob * tf.math.log(contrast_prob + 1e-8), -1))
contrast_acc = tf.equal(tf.argmax(labels, 1), tf.argmax(logits_ab, axis=1))
contrast_acc = tf.reduce_mean(tf.cast(contrast_acc, tf.float32))
return {
'total_loss': total_loss,
'contrastive_loss': loss,
'reg_loss': l2_loss,
'contrast_acc': contrast_acc,
'contrast_entropy': contrast_entropy,
}
def tpu_cross_replica_concat(tensor, num_replicas):
"""Reduce a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
num_replicas: number of TPU device replicas.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
with tf.name_scope('tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[xla.replica_id()]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
replica_context = tf.distribute.get_replica_context()
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
| 5,173 | 37.044118 | 79 | py |
models | models-master/official/projects/panoptic/serving/panoptic_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation input and model functions for serving/inference."""
from typing import List
import tensorflow as tf
from official.core import config_definitions as cfg
from official.projects.panoptic.modeling import factory
from official.projects.panoptic.modeling import panoptic_deeplab_model
from official.vision.serving import semantic_segmentation
class PanopticSegmentationModule(
semantic_segmentation.SegmentationModule):
"""Panoptic Deeplab Segmentation Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
model: tf.keras.Model,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Initializes panoptic segmentation module for export."""
if batch_size is None:
raise ValueError('batch_size cannot be None for panoptic segmentation '
'model.')
if not isinstance(model, panoptic_deeplab_model.PanopticDeeplabModel):
raise ValueError('PanopticSegmentationModule module not '
'implemented for {} model.'.format(type(model)))
params.task.train_data.preserve_aspect_ratio = True
super(PanopticSegmentationModule, self).__init__(
params=params,
model=model,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return factory.build_panoptic_deeplab(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(self, images: tf.Tensor):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.model.call(
inputs=images, image_info=image_info, training=False)
masks = outputs['segmentation_outputs']
masks = tf.image.resize(masks, self._input_image_size, method='bilinear')
classes = tf.math.argmax(masks, axis=-1)
scores = tf.nn.softmax(masks, axis=-1)
final_outputs = {
'semantic_logits': masks,
'semantic_scores': scores,
'semantic_classes': classes,
'image_info': image_info,
'panoptic_category_mask': outputs['category_mask'],
'panoptic_instance_mask': outputs['instance_mask'],
}
return final_outputs
| 3,757 | 35.134615 | 77 | py |
models | models-master/official/projects/panoptic/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Panoptic MaskRCNN model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
import tensorflow as tf
from official.core import exp_factory
from official.modeling import hyperparams
# pylint: disable=unused-import
from official.projects.panoptic.configs import panoptic_deeplab as panoptic_deeplab_cfg
from official.projects.panoptic.configs import panoptic_maskrcnn as panoptic_maskrcnn_cfg
# pylint: enable=unused-import
from official.projects.panoptic.modeling import factory
from official.projects.panoptic.serving import panoptic_deeplab
from official.projects.panoptic.serving import panoptic_maskrcnn
# pylint: disable=unused-import
from official.projects.panoptic.tasks import panoptic_deeplab as panoptic_deeplab_task
from official.projects.panoptic.tasks import panoptic_maskrcnn as panoptic_maskrcnn_task
# pylint: enable=unused-import
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('model', 'panoptic_maskrcnn',
'model type, one of panoptic_maskrcnn and panoptic_deeplab')
flags.DEFINE_string('experiment', 'panoptic_fpn_coco',
'experiment type, e.g. panoptic_fpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer('batch_size', None, 'The batch size.')
flags.DEFINE_string('input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
input_image_size = [int(x) for x in FLAGS.input_image_size.split(',')]
input_specs = tf.keras.layers.InputSpec(
shape=[FLAGS.batch_size, *input_image_size, 3])
if FLAGS.model == 'panoptic_deeplab':
build_model = factory.build_panoptic_deeplab
panoptic_module = panoptic_deeplab.PanopticSegmentationModule
elif FLAGS.model == 'panoptic_maskrcnn':
build_model = factory.build_panoptic_maskrcnn
panoptic_module = panoptic_maskrcnn.PanopticSegmentationModule
else:
raise ValueError('Unsupported model type: %s' % FLAGS.model)
model = build_model(input_specs=input_specs, model_config=params.task.model)
export_module = panoptic_module(
params=params,
model=model,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=input_image_size,
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_module=export_module,
export_checkpoint_subdir='checkpoint',
export_saved_model_subdir='saved_model')
if __name__ == '__main__':
app.run(main)
| 5,183 | 38.572519 | 89 | py |
models | models-master/official/projects/panoptic/serving/panoptic_maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation input and model functions for serving/inference."""
from typing import List
import tensorflow as tf
from official.core import config_definitions as cfg
from official.projects.panoptic.modeling import panoptic_maskrcnn_model
from official.vision.serving import detection
class PanopticSegmentationModule(detection.DetectionModule):
"""Panoptic Segmentation Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
model: tf.keras.Model,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Initializes panoptic segmentation module for export."""
if batch_size is None:
raise ValueError('batch_size cannot be None for panoptic segmentation '
'model.')
if not isinstance(model, panoptic_maskrcnn_model.PanopticMaskRCNNModel):
raise ValueError('PanopticSegmentationModule module not implemented for '
'{} model.'.format(type(model)))
super().__init__(
params=params,
model=model,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
def serve(self, images: tf.Tensor):
"""Casts image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
model_params = self.params.task.model
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec = tf.TensorSpec(shape=self._input_image_size + [3],
dtype=tf.float32)
num_anchors = model_params.anchor.num_scales * len(
model_params.anchor.aspect_ratios) * 4
anchor_shapes = []
for level in range(model_params.min_level, model_params.max_level + 1):
anchor_level_spec = tf.TensorSpec(
shape=[
self._input_image_size[0] // 2**level,
self._input_image_size[1] // 2**level, num_anchors
],
dtype=tf.float32)
anchor_shapes.append((str(level), anchor_level_spec))
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, anchor_boxes, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, dict(anchor_shapes),
image_info_spec),
parallel_iterations=32))
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
detections = self.model.call(
images=images,
image_info=image_info,
anchor_boxes=anchor_boxes,
training=False)
detections.pop('rpn_boxes')
detections.pop('rpn_scores')
detections.pop('cls_outputs')
detections.pop('box_outputs')
detections.pop('backbone_features')
detections.pop('decoder_features')
if model_params.detection_generator.apply_nms:
# Normalize detection boxes to [0, 1]. Here we first map them to the
# original image size, then normalize them to [0, 1].
detections['detection_boxes'] = (
detections['detection_boxes'] /
tf.tile(image_info[:, 2:3, :], [1, 1, 2]) /
tf.tile(image_info[:, 0:1, :], [1, 1, 2]))
final_outputs = {
'detection_boxes': detections['detection_boxes'],
'detection_scores': detections['detection_scores'],
'detection_classes': detections['detection_classes'],
'num_detections': detections['num_detections']
}
if 'detection_outer_boxes' in detections:
detections['detection_outer_boxes'] = (
detections['detection_outer_boxes'] /
tf.tile(image_info[:, 2:3, :], [1, 1, 2]) /
tf.tile(image_info[:, 0:1, :], [1, 1, 2]))
final_outputs['detection_outer_boxes'] = (
detections['detection_outer_boxes'])
else:
final_outputs = {
'decoded_boxes': detections['decoded_boxes'],
'decoded_box_scores': detections['decoded_box_scores']
}
masks = detections['segmentation_outputs']
masks = tf.image.resize(masks, self._input_image_size, method='bilinear')
classes = tf.math.argmax(masks, axis=-1)
if self.params.task.losses.semantic_segmentation_use_binary_cross_entropy:
scores = tf.nn.sigmoid(masks)
else:
scores = tf.nn.softmax(masks, axis=-1)
final_outputs.update({
'detection_masks': detections['detection_masks'],
'semantic_logits': masks,
'semantic_scores': scores,
'semantic_classes': classes,
'image_info': image_info
})
if model_params.generate_panoptic_masks:
final_outputs.update({
'panoptic_category_mask':
detections['panoptic_outputs']['category_mask'],
'panoptic_instance_mask':
detections['panoptic_outputs']['instance_mask'],
})
return final_outputs
| 5,945 | 37.115385 | 79 | py |
models | models-master/official/projects/panoptic/modeling/panoptic_deeplab_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build Panoptic Deeplab model."""
from typing import Any, Mapping, Optional, Union
import tensorflow as tf
from official.projects.panoptic.modeling.layers import panoptic_deeplab_merge
@tf.keras.utils.register_keras_serializable(package='Vision')
class PanopticDeeplabModel(tf.keras.Model):
"""Panoptic Deeplab model."""
def __init__(
self,
backbone: tf.keras.Model,
semantic_decoder: tf.keras.Model,
semantic_head: tf.keras.layers.Layer,
instance_head: tf.keras.layers.Layer,
instance_decoder: Optional[tf.keras.Model] = None,
post_processor: Optional[panoptic_deeplab_merge.PostProcessor] = None,
**kwargs):
"""Panoptic deeplab model initializer.
Args:
backbone: a backbone network.
semantic_decoder: a decoder network. E.g. FPN.
semantic_head: segmentation head.
instance_head: instance center head.
instance_decoder: Optional decoder network for instance predictions.
post_processor: Optional post processor layer.
**kwargs: keyword arguments to be passed.
"""
super(PanopticDeeplabModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'semantic_decoder': semantic_decoder,
'instance_decoder': instance_decoder,
'semantic_head': semantic_head,
'instance_head': instance_head,
'post_processor': post_processor
}
self.backbone = backbone
self.semantic_decoder = semantic_decoder
self.instance_decoder = instance_decoder
self.semantic_head = semantic_head
self.instance_head = instance_head
self.post_processor = post_processor
def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self, inputs: tf.Tensor,
image_info: tf.Tensor,
training: bool = None):
if training is None:
training = tf.keras.backend.learning_phase()
backbone_features = self.backbone(inputs, training=training)
semantic_features = self.semantic_decoder(
backbone_features, training=training)
if self.instance_decoder is None:
instance_features = semantic_features
else:
instance_features = self.instance_decoder(
backbone_features, training=training)
segmentation_outputs = self.semantic_head(
(backbone_features, semantic_features),
training=training)
instance_outputs = self.instance_head(
(backbone_features, instance_features),
training=training)
outputs = {
'segmentation_outputs': segmentation_outputs,
'instance_centers_heatmap':
instance_outputs['instance_centers_heatmap'],
'instance_centers_offset':
instance_outputs['instance_centers_offset'],
}
if training:
return outputs
if self.post_processor is not None:
panoptic_masks = self.post_processor(outputs, image_info)
outputs.update(panoptic_masks)
return outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(
backbone=self.backbone,
semantic_decoder=self.semantic_decoder,
semantic_head=self.semantic_head,
instance_head=self.instance_head)
if self.instance_decoder is not None:
items.update(instance_decoder=self.instance_decoder)
return items
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,220 | 33.317073 | 86 | py |
models | models-master/official/projects/panoptic/modeling/panoptic_maskrcnn_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation model."""
from typing import List, Mapping, Optional, Union
import tensorflow as tf
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
class PanopticMaskRCNNModel(maskrcnn_model.DeepMaskRCNNModel):
"""The Panoptic Segmentation model."""
def __init__(
self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
rpn_head: tf.keras.layers.Layer,
detection_head: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_generator: tf.keras.layers.Layer,
roi_sampler: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_aligner: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
panoptic_segmentation_generator: Optional[tf.keras.layers.Layer] = None,
mask_head: Optional[tf.keras.layers.Layer] = None,
mask_sampler: Optional[tf.keras.layers.Layer] = None,
mask_roi_aligner: Optional[tf.keras.layers.Layer] = None,
segmentation_backbone: Optional[tf.keras.Model] = None,
segmentation_decoder: Optional[tf.keras.Model] = None,
segmentation_head: tf.keras.layers.Layer = None,
class_agnostic_bbox_pred: bool = False,
cascade_class_ensemble: bool = False,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
outer_boxes_scale: float = 1.0,
use_gt_boxes_for_masks: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes the Panoptic Mask R-CNN model.
Args:
backbone: `tf.keras.Model`, the backbone network.
decoder: `tf.keras.Model`, the decoder network.
rpn_head: the RPN head.
detection_head: the detection head or a list of heads.
roi_generator: the ROI generator.
roi_sampler: a single ROI sampler or a list of ROI samplers for cascade
detection heads.
roi_aligner: the ROI aligner.
detection_generator: the detection generator.
panoptic_segmentation_generator: the panoptic segmentation generator that
is used to merge instance and semantic segmentation masks.
mask_head: the mask head.
mask_sampler: the mask sampler.
mask_roi_aligner: the ROI alginer for mask prediction.
segmentation_backbone: `tf.keras.Model`, the backbone network for the
segmentation head for panoptic task. Providing `segmentation_backbone`
will allow the segmentation head to use a standlone backbone. Setting
`segmentation_backbone=None` would enable backbone sharing between the
MaskRCNN model and segmentation head.
segmentation_decoder: `tf.keras.Model`, the decoder network for the
segmentation head for panoptic task. Providing `segmentation_decoder`
will allow the segmentation head to use a standlone decoder. Setting
`segmentation_decoder=None` would enable decoder sharing between the
MaskRCNN model and segmentation head. Decoders can only be shared when
`segmentation_backbone` is shared as well.
segmentation_head: segmentatation head for panoptic task.
class_agnostic_bbox_pred: if True, perform class agnostic bounding box
prediction. Needs to be `True` for Cascade RCNN models.
cascade_class_ensemble: if True, ensemble classification scores over all
detection heads.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added on each level.
For instances, num_scales=2 adds one additional intermediate anchor
scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito anchors added on each
level. The number indicates the ratio of width to height. For instances,
aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level.
anchor_size: A number representing the scale of size of the base anchor to
the feature stride 2^level.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
use_gt_boxes_for_masks: `bool`, whether to use only gt boxes for masks.
**kwargs: keyword arguments to be passed.
"""
super().__init__(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator,
roi_sampler=roi_sampler,
roi_aligner=roi_aligner,
detection_generator=detection_generator,
mask_head=mask_head,
mask_sampler=mask_sampler,
mask_roi_aligner=mask_roi_aligner,
class_agnostic_bbox_pred=class_agnostic_bbox_pred,
cascade_class_ensemble=cascade_class_ensemble,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
outer_boxes_scale=outer_boxes_scale,
use_gt_boxes_for_masks=use_gt_boxes_for_masks,
**kwargs)
self._config_dict.update({
'segmentation_backbone': segmentation_backbone,
'segmentation_decoder': segmentation_decoder,
'segmentation_head': segmentation_head
})
if panoptic_segmentation_generator is not None:
self._config_dict.update(
{'panoptic_segmentation_generator': panoptic_segmentation_generator})
if not self._include_mask:
raise ValueError(
'`mask_head` needs to be provided for Panoptic Mask R-CNN.')
if segmentation_backbone is not None and segmentation_decoder is None:
raise ValueError(
'`segmentation_decoder` needs to be provided for Panoptic Mask R-CNN'
'if `backbone` is not shared.')
self.segmentation_backbone = segmentation_backbone
self.segmentation_decoder = segmentation_decoder
self.segmentation_head = segmentation_head
self.panoptic_segmentation_generator = panoptic_segmentation_generator
def call(self,
images: tf.Tensor,
image_info: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
gt_masks: Optional[tf.Tensor] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
image_shape = image_info[:, 1, :]
model_kwargs = {
'images': images,
'image_shape': image_shape,
'anchor_boxes': anchor_boxes,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'gt_masks': gt_masks,
'training': training,
}
if self.outer_boxes_scale > 1.0:
model_kwargs['gt_outer_boxes'] = gt_outer_boxes
model_outputs = super().call(**model_kwargs)
if self.segmentation_backbone is not None:
backbone_features = self.segmentation_backbone(images, training=training)
else:
backbone_features = model_outputs['backbone_features']
if self.segmentation_decoder is not None:
decoder_features = self.segmentation_decoder(
backbone_features, training=training)
else:
decoder_features = model_outputs['decoder_features']
segmentation_outputs = self.segmentation_head(
(backbone_features, decoder_features), training=training)
model_outputs.update({
'segmentation_outputs': segmentation_outputs,
})
if not training and self.panoptic_segmentation_generator is not None:
panoptic_outputs = self.panoptic_segmentation_generator(
model_outputs, image_info=image_info)
model_outputs.update({'panoptic_outputs': panoptic_outputs})
return model_outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = super().checkpoint_items
if self.segmentation_backbone is not None:
items.update(segmentation_backbone=self.segmentation_backbone)
if self.segmentation_decoder is not None:
items.update(segmentation_decoder=self.segmentation_decoder)
items.update(segmentation_head=self.segmentation_head)
return items
| 9,102 | 42.555024 | 102 | py |
models | models-master/official/projects/panoptic/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory method to build panoptic segmentation model."""
from typing import Optional
import tensorflow as tf
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
from official.projects.panoptic.configs import panoptic_deeplab as panoptic_deeplab_cfg
from official.projects.panoptic.configs import panoptic_maskrcnn as panoptic_maskrcnn_cfg
from official.projects.panoptic.modeling import panoptic_deeplab_model
from official.projects.panoptic.modeling import panoptic_maskrcnn_model
from official.projects.panoptic.modeling.heads import panoptic_deeplab_heads
from official.projects.panoptic.modeling.layers import panoptic_deeplab_merge
from official.projects.panoptic.modeling.layers import panoptic_segmentation_generator
from official.vision.modeling import backbones
from official.vision.modeling.decoders import factory as decoder_factory
from official.vision.modeling.heads import segmentation_heads
def build_panoptic_maskrcnn(
input_specs: tf.keras.layers.InputSpec,
model_config: panoptic_maskrcnn_cfg.PanopticMaskRCNN,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Panoptic Mask R-CNN model.
This factory function builds the mask rcnn first, builds the non-shared
semantic segmentation layers, and finally combines the two models to form
the panoptic segmentation model.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: Config instance for the panoptic maskrcnn model.
l2_regularizer: Optional `tf.keras.regularizers.Regularizer`, if specified,
the model is built with the provided regularization layer.
Returns:
tf.keras.Model for the panoptic segmentation model.
"""
norm_activation_config = model_config.norm_activation
segmentation_config = model_config.segmentation_model
# Builds the maskrcnn model.
maskrcnn_model = deep_mask_head_rcnn.build_maskrcnn(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
# Builds the semantic segmentation branch.
if not model_config.shared_backbone:
segmentation_backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=segmentation_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
segmentation_decoder_input_specs = segmentation_backbone.output_specs
else:
segmentation_backbone = None
segmentation_decoder_input_specs = maskrcnn_model.backbone.output_specs
if not model_config.shared_decoder:
segmentation_decoder = decoder_factory.build_decoder(
input_specs=segmentation_decoder_input_specs,
model_config=segmentation_config,
l2_regularizer=l2_regularizer)
decoder_config = segmentation_decoder.get_config()
else:
segmentation_decoder = None
decoder_config = maskrcnn_model.decoder.get_config()
segmentation_head_config = segmentation_config.head
detection_head_config = model_config.detection_head
postprocessing_config = model_config.panoptic_segmentation_generator
segmentation_head = segmentation_heads.SegmentationHead(
num_classes=segmentation_config.num_classes,
level=segmentation_head_config.level,
num_convs=segmentation_head_config.num_convs,
prediction_kernel_size=segmentation_head_config.prediction_kernel_size,
num_filters=segmentation_head_config.num_filters,
upsample_factor=segmentation_head_config.upsample_factor,
feature_fusion=segmentation_head_config.feature_fusion,
decoder_min_level=segmentation_head_config.decoder_min_level,
decoder_max_level=segmentation_head_config.decoder_max_level,
low_level=segmentation_head_config.low_level,
low_level_num_filters=segmentation_head_config.low_level_num_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
num_decoder_filters=decoder_config['num_filters'],
kernel_regularizer=l2_regularizer)
if model_config.generate_panoptic_masks:
max_num_detections = model_config.detection_generator.max_num_detections
mask_binarize_threshold = postprocessing_config.mask_binarize_threshold
panoptic_segmentation_generator_obj = (
panoptic_segmentation_generator.PanopticSegmentationGeneratorV2(
output_size=postprocessing_config.output_size,
max_num_detections=max_num_detections,
stuff_classes_offset=model_config.stuff_classes_offset,
mask_binarize_threshold=mask_binarize_threshold,
score_threshold=postprocessing_config.score_threshold,
things_overlap_threshold=postprocessing_config
.things_overlap_threshold,
things_class_label=postprocessing_config.things_class_label,
stuff_area_threshold=postprocessing_config.stuff_area_threshold,
void_class_label=postprocessing_config.void_class_label,
void_instance_id=postprocessing_config.void_instance_id,
rescale_predictions=postprocessing_config.rescale_predictions))
else:
panoptic_segmentation_generator_obj = None
# Combines maskrcnn, and segmentation models to build panoptic segmentation
# model.
model = panoptic_maskrcnn_model.PanopticMaskRCNNModel(
backbone=maskrcnn_model.backbone,
decoder=maskrcnn_model.decoder,
rpn_head=maskrcnn_model.rpn_head,
detection_head=maskrcnn_model.detection_head,
roi_generator=maskrcnn_model.roi_generator,
roi_sampler=maskrcnn_model.roi_sampler,
roi_aligner=maskrcnn_model.roi_aligner,
detection_generator=maskrcnn_model.detection_generator,
panoptic_segmentation_generator=panoptic_segmentation_generator_obj,
mask_head=maskrcnn_model.mask_head,
mask_sampler=maskrcnn_model.mask_sampler,
mask_roi_aligner=maskrcnn_model.mask_roi_aligner,
segmentation_backbone=segmentation_backbone,
segmentation_decoder=segmentation_decoder,
segmentation_head=segmentation_head,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
cascade_class_ensemble=detection_head_config.cascade_class_ensemble,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size,
outer_boxes_scale=maskrcnn_model.outer_boxes_scale)
return model
def build_panoptic_deeplab(
input_specs: tf.keras.layers.InputSpec,
model_config: panoptic_deeplab_cfg.PanopticDeeplab,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds Panoptic Deeplab model.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: Config instance for the panoptic deeplab model.
l2_regularizer: Optional `tf.keras.regularizers.Regularizer`, if specified,
the model is built with the provided regularization layer.
Returns:
tf.keras.Model for the panoptic segmentation model.
"""
norm_activation_config = model_config.norm_activation
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
semantic_decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
if model_config.shared_decoder:
instance_decoder = None
else:
# semantic and instance share the same decoder type
instance_decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
semantic_head_config = model_config.semantic_head
instance_head_config = model_config.instance_head
semantic_head = panoptic_deeplab_heads.SemanticHead(
num_classes=model_config.num_classes,
level=semantic_head_config.level,
num_convs=semantic_head_config.num_convs,
kernel_size=semantic_head_config.kernel_size,
prediction_kernel_size=semantic_head_config.prediction_kernel_size,
num_filters=semantic_head_config.num_filters,
use_depthwise_convolution=semantic_head_config.use_depthwise_convolution,
upsample_factor=semantic_head_config.upsample_factor,
low_level=semantic_head_config.low_level,
low_level_num_filters=semantic_head_config.low_level_num_filters,
fusion_num_output_filters=semantic_head_config.fusion_num_output_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
instance_head = panoptic_deeplab_heads.InstanceHead(
level=instance_head_config.level,
num_convs=instance_head_config.num_convs,
kernel_size=instance_head_config.kernel_size,
prediction_kernel_size=instance_head_config.prediction_kernel_size,
num_filters=instance_head_config.num_filters,
use_depthwise_convolution=instance_head_config.use_depthwise_convolution,
upsample_factor=instance_head_config.upsample_factor,
low_level=instance_head_config.low_level,
low_level_num_filters=instance_head_config.low_level_num_filters,
fusion_num_output_filters=instance_head_config.fusion_num_output_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
if model_config.generate_panoptic_masks:
post_processing_config = model_config.post_processor
post_processor = panoptic_deeplab_merge.PostProcessor(
output_size=post_processing_config.output_size,
center_score_threshold=post_processing_config.center_score_threshold,
thing_class_ids=post_processing_config.thing_class_ids,
label_divisor=post_processing_config.label_divisor,
stuff_area_limit=post_processing_config.stuff_area_limit,
ignore_label=post_processing_config.ignore_label,
nms_kernel=post_processing_config.nms_kernel,
keep_k_centers=post_processing_config.keep_k_centers,
rescale_predictions=post_processing_config.rescale_predictions)
else:
post_processor = None
model = panoptic_deeplab_model.PanopticDeeplabModel(
backbone=backbone,
semantic_decoder=semantic_decoder,
instance_decoder=instance_decoder,
semantic_head=semantic_head,
instance_head=instance_head,
post_processor=post_processor)
return model
| 11,721 | 45.149606 | 139 | py |
models | models-master/official/projects/panoptic/modeling/layers/panoptic_deeplab_merge.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains functions to post-process Panoptic-DeepLab results.
Note that the postprocessing class and the supporting functions are branched
from:
https://github.com/google-research/deeplab2/blob/main/model/post_processor/panoptic_deeplab.py
with minor changes.
"""
import functools
from typing import Dict, List, Text, Tuple
import tensorflow as tf
from official.projects.panoptic.ops import mask_ops
def _add_zero_padding(input_tensor: tf.Tensor, kernel_size: int,
rank: int) -> tf.Tensor:
"""Adds zero-padding to the input_tensor."""
pad_total = kernel_size - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
if rank == 3:
return tf.pad(
input_tensor,
paddings=[[pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
else:
return tf.pad(
input_tensor,
paddings=[[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
def _get_semantic_predictions(semantic_logits: tf.Tensor) -> tf.Tensor:
"""Computes the semantic classes from the predictions.
Args:
semantic_logits: A tf.tensor of shape [batch, height, width, classes].
Returns:
A tf.Tensor containing the semantic class prediction of shape
[batch, height, width].
"""
return tf.argmax(semantic_logits, axis=-1, output_type=tf.int32)
def _get_instance_centers_from_heatmap(
center_heatmap: tf.Tensor,
center_threshold: float,
nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes a list of instance centers.
Args:
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_threshold: A float setting the threshold for the center heatmap.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep (K).
Non-positive values will keep all centers.
Returns:
A tuple of
- tf.Tensor of shape [N, 2] containing N center coordinates (after
non-maximum suppression) in (y, x) order.
- tf.Tensor of shape [height, width] containing the center heatmap after
non-maximum suppression.
"""
# Threshold center map.
center_heatmap = tf.where(
tf.greater(center_heatmap, center_threshold), center_heatmap, 0.0)
# Non-maximum suppression.
padded_map = _add_zero_padding(center_heatmap, nms_kernel_size, rank=3)
pooled_center_heatmap = tf.keras.backend.pool2d(
tf.expand_dims(padded_map, 0),
pool_size=(nms_kernel_size, nms_kernel_size),
strides=(1, 1),
padding='valid',
pool_mode='max')
center_heatmap = tf.where(
tf.equal(pooled_center_heatmap, center_heatmap), center_heatmap, 0.0)
center_heatmap = tf.squeeze(center_heatmap, axis=[0, 3])
# `centers` is of shape (N, 2) with (y, x) order of the second dimension.
centers = tf.where(tf.greater(center_heatmap, 0.0))
if keep_k_centers > 0 and tf.shape(centers)[0] > keep_k_centers:
topk_scores, _ = tf.math.top_k(
tf.reshape(center_heatmap, [-1]), keep_k_centers, sorted=False)
centers = tf.where(tf.greater(center_heatmap, topk_scores[-1]))
return centers, center_heatmap
def _find_closest_center_per_pixel(centers: tf.Tensor,
center_offsets: tf.Tensor) -> tf.Tensor:
"""Assigns all pixels to their closest center.
Args:
centers: A tf.Tensor of shape [N, 2] containing N centers with coordinate
order (y, x).
center_offsets: A tf.Tensor of shape [height, width, 2].
Returns:
A tf.Tensor of shape [height, width] containing the index of the closest
center, per pixel.
"""
height = tf.shape(center_offsets)[0]
width = tf.shape(center_offsets)[1]
x_coord, y_coord = tf.meshgrid(tf.range(width), tf.range(height))
coord = tf.stack([y_coord, x_coord], axis=-1)
center_per_pixel = tf.cast(coord, tf.float32) + center_offsets
# centers: [N, 2] -> [N, 1, 2].
# center_per_pixel: [H, W, 2] -> [1, H*W, 2].
centers = tf.cast(tf.expand_dims(centers, 1), tf.float32)
center_per_pixel = tf.reshape(center_per_pixel, [height*width, 2])
center_per_pixel = tf.expand_dims(center_per_pixel, 0)
# distances: [N, H*W].
distances = tf.norm(centers - center_per_pixel, axis=-1)
return tf.reshape(tf.argmin(distances, axis=0), [height, width])
def _get_instances_from_heatmap_and_offset(
semantic_segmentation: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the instance assignment per pixel.
Args:
semantic_segmentation: A tf.Tensor containing the semantic labels of shape
[height, width].
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_offsets: A tf.Tensor of shape [height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- tf.Tensor containing the instance segmentation (filtered with the `thing`
segmentation from the semantic segmentation output) with shape
[height, width].
- tf.Tensor containing the processed centermap with shape [height, width].
- tf.Tensor containing instance scores (where higher "score" is a reasonable
signal of a higher confidence detection.) Will be of shape [height, width]
with the score for a pixel being the score of the instance it belongs to.
The scores will be zero for pixels in background/"stuff" regions.
"""
thing_segmentation = tf.zeros_like(semantic_segmentation)
for thing_id in thing_class_ids:
thing_segmentation = tf.where(tf.equal(semantic_segmentation, thing_id),
1,
thing_segmentation)
centers, processed_center_heatmap = _get_instance_centers_from_heatmap(
center_heatmap, center_threshold, nms_kernel_size, keep_k_centers)
if tf.shape(centers)[0] == 0:
return (tf.zeros_like(semantic_segmentation), processed_center_heatmap,
tf.zeros_like(processed_center_heatmap))
instance_center_index = _find_closest_center_per_pixel(
centers, center_offsets)
# Instance IDs should start with 1. So we use the index into the centers, but
# shifted by 1.
instance_segmentation = tf.cast(instance_center_index, tf.int32) + 1
# The value of the heatmap at an instance's center is used as the score
# for that instance.
instance_scores = tf.gather_nd(processed_center_heatmap, centers)
# This will map the instance scores back to the image space: where each pixel
# has a value equal to the score of its instance.
flat_center_index = tf.reshape(instance_center_index, [-1])
instance_score_map = tf.gather(instance_scores, flat_center_index)
instance_score_map = tf.reshape(instance_score_map,
tf.shape(instance_segmentation))
instance_score_map *= tf.cast(thing_segmentation, tf.float32)
return (thing_segmentation * instance_segmentation, processed_center_heatmap,
instance_score_map)
@tf.function
def _get_panoptic_predictions(
semantic_logits: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, label_divisor: int, stuff_area_limit: int,
void_label: int, nms_kernel_size: int, keep_k_centers: int
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the semantic class and instance ID per pixel.
Args:
semantic_logits: A tf.Tensor of shape [batch, height, width, classes].
center_heatmap: A tf.Tensor of shape [batch, height, width, 1].
center_offsets: A tf.Tensor of shape [batch, height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- the panoptic prediction as tf.Tensor with shape [batch, height, width].
- the centermap prediction as tf.Tensor with shape [batch, height, width].
- the instance score maps as tf.Tensor with shape [batch, height, width].
- the instance prediction as tf.Tensor with shape [batch, height, width].
"""
semantic_prediction = _get_semantic_predictions(semantic_logits)
batch_size = tf.shape(semantic_logits)[0]
instance_map_lists = tf.TensorArray(
tf.int32, size=batch_size, dynamic_size=False)
center_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
instance_score_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
for i in tf.range(batch_size):
(instance_map, center_map,
instance_score_map) = _get_instances_from_heatmap_and_offset(
semantic_prediction[i, ...], center_heatmap[i, ...],
center_offsets[i, ...], center_threshold, thing_class_ids,
nms_kernel_size, keep_k_centers)
instance_map_lists = instance_map_lists.write(i, instance_map)
center_map_lists = center_map_lists.write(i, center_map)
instance_score_map_lists = instance_score_map_lists.write(
i, instance_score_map)
# This does not work with unknown shapes.
instance_maps = instance_map_lists.stack()
center_maps = center_map_lists.stack()
instance_score_maps = instance_score_map_lists.stack()
panoptic_prediction = _merge_semantic_and_instance_maps(
semantic_prediction, instance_maps, thing_class_ids, label_divisor,
stuff_area_limit, void_label)
return (panoptic_prediction, center_maps, instance_score_maps, instance_maps)
@tf.function
def _merge_semantic_and_instance_maps(
semantic_prediction: tf.Tensor,
instance_maps: tf.Tensor,
thing_class_ids: tf.Tensor,
label_divisor: int,
stuff_area_limit: int,
void_label: int) -> tf.Tensor:
"""Merges semantic and instance maps to obtain panoptic segmentation.
This function merges the semantic segmentation and class-agnostic
instance segmentation to form the panoptic segmentation. In particular,
the class label of each instance mask is inferred from the majority
votes from the corresponding pixels in the semantic segmentation. This
operation is first proposed in the DeeperLab paper and adopted by the
Panoptic-DeepLab.
- DeeperLab: Single-Shot Image Parser, T-J Yang, et al. arXiv:1902.05093.
- Panoptic-DeepLab, B. Cheng, et al. In CVPR, 2020.
Note that this function only supports batch = 1 for simplicity. Additionally,
this function has a slightly different implementation from the provided
TensorFlow implementation `merge_ops` but with a similar performance. This
function is mainly used as a backup solution when you could not successfully
compile the provided TensorFlow implementation. To reproduce our results,
please use the provided TensorFlow implementation (i.e., not use this
function, but the `merge_ops.merge_semantic_and_instance_maps`).
Args:
semantic_prediction: A tf.Tensor of shape [batch, height, width].
instance_maps: A tf.Tensor of shape [batch, height, width].
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
Returns:
panoptic_prediction: A tf.Tensor with shape [batch, height, width].
"""
prediction_shape = semantic_prediction.get_shape().as_list()
# This implementation only supports batch size of 1. Since model construction
# might lose batch size information (and leave it to None), override it here.
prediction_shape[0] = 1
semantic_prediction = tf.ensure_shape(semantic_prediction, prediction_shape)
instance_maps = tf.ensure_shape(instance_maps, prediction_shape)
# Default panoptic_prediction to have semantic label = void_label.
panoptic_prediction = tf.ones_like(
semantic_prediction) * void_label * label_divisor
# Start to paste predicted `thing` regions to panoptic_prediction.
# Infer `thing` segmentation regions from semantic prediction.
semantic_thing_segmentation = tf.zeros_like(semantic_prediction,
dtype=tf.bool)
for thing_class in thing_class_ids:
semantic_thing_segmentation = tf.math.logical_or(
semantic_thing_segmentation,
semantic_prediction == thing_class)
# Keep track of how many instances for each semantic label.
num_instance_per_semantic_label = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, clear_after_read=False)
instance_ids, _ = tf.unique(tf.reshape(instance_maps, [-1]))
for instance_id in instance_ids:
# Instance ID 0 is reserved for crowd region.
if instance_id == 0:
continue
thing_mask = tf.math.logical_and(instance_maps == instance_id,
semantic_thing_segmentation)
if tf.reduce_sum(tf.cast(thing_mask, tf.int32)) == 0:
continue
semantic_bin_counts = tf.math.bincount(
tf.boolean_mask(semantic_prediction, thing_mask))
semantic_majority = tf.cast(
tf.math.argmax(semantic_bin_counts), tf.int32)
while num_instance_per_semantic_label.size() <= semantic_majority:
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
num_instance_per_semantic_label.size(), 0)
new_instance_id = (
num_instance_per_semantic_label.read(semantic_majority) + 1)
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
semantic_majority, new_instance_id)
panoptic_prediction = tf.where(
thing_mask,
tf.ones_like(panoptic_prediction) * semantic_majority * label_divisor
+ new_instance_id,
panoptic_prediction)
# Done with `num_instance_per_semantic_label` tensor array.
num_instance_per_semantic_label.close()
# Start to paste predicted `stuff` regions to panoptic prediction.
instance_stuff_regions = instance_maps == 0
semantic_ids, _ = tf.unique(tf.reshape(semantic_prediction, [-1]))
for semantic_id in semantic_ids:
if tf.reduce_sum(tf.cast(thing_class_ids == semantic_id, tf.int32)) > 0:
continue
# Check stuff area.
stuff_mask = tf.math.logical_and(semantic_prediction == semantic_id,
instance_stuff_regions)
stuff_area = tf.reduce_sum(tf.cast(stuff_mask, tf.int32))
if stuff_area >= stuff_area_limit:
panoptic_prediction = tf.where(
stuff_mask,
tf.ones_like(panoptic_prediction) * semantic_id * label_divisor,
panoptic_prediction)
return panoptic_prediction
class PostProcessor(tf.keras.layers.Layer):
"""This class contains code of a Panoptic-Deeplab post-processor."""
def __init__(
self,
output_size: List[int],
center_score_threshold: float,
thing_class_ids: List[int],
label_divisor: int,
stuff_area_limit: int,
ignore_label: int,
nms_kernel: int,
keep_k_centers: int,
rescale_predictions: bool,
**kwargs):
"""Initializes a Panoptic-Deeplab post-processor.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
center_score_threshold: A float setting the threshold for the center
heatmap.
thing_class_ids: An integer list shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit;
otherwise, it will be re-assigned as void_label.
ignore_label: An integer specifying the void label.
nms_kernel: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kwargs arguments.
"""
super(PostProcessor, self).__init__(**kwargs)
self._config_dict = {
'output_size': output_size,
'center_score_threshold': center_score_threshold,
'thing_class_ids': thing_class_ids,
'label_divisor': label_divisor,
'stuff_area_limit': stuff_area_limit,
'ignore_label': ignore_label,
'nms_kernel': nms_kernel,
'keep_k_centers': keep_k_centers,
'rescale_predictions': rescale_predictions
}
self._post_processor = functools.partial(
_get_panoptic_predictions,
center_threshold=center_score_threshold,
thing_class_ids=tf.convert_to_tensor(thing_class_ids),
label_divisor=label_divisor,
stuff_area_limit=stuff_area_limit,
void_label=ignore_label,
nms_kernel_size=nms_kernel,
keep_k_centers=keep_k_centers)
def _resize_and_pad_masks(self, mask, image_info):
"""Resizes masks to match the original image shape and pads to`output_size`.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = tf.image.resize(
mask,
rescale_size,
method='bilinear')
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def _resize_and_pad_offset_mask(self, mask, image_info):
"""Rescales and resizes offset masks and pads to`output_size`.
Args:
mask: a padded offset mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
rescaled, resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = mask_ops.resize_and_rescale_offsets(
tf.expand_dims(mask, axis=0),
rescale_size)[0]
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def call(
self,
result_dict: Dict[Text, tf.Tensor],
image_info: tf.Tensor) -> Dict[Text, tf.Tensor]:
"""Performs the post-processing given model predicted results.
Args:
result_dict: A dictionary of tf.Tensor containing model results. The dict
has to contain
- segmentation_outputs
- instance_centers_heatmap
- instance_centers_offset
image_info: A tf.Tensor of image infos.
Returns:
The post-processed dict of tf.Tensor, containing the following keys:
- panoptic_outputs
- category_mask
- instance_mask
- instance_centers
- instance_score
"""
if self._config_dict['rescale_predictions']:
segmentation_outputs = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['segmentation_outputs'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_heatmap = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['instance_centers_heatmap'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_offset = tf.map_fn(
fn=lambda x: self._resize_and_pad_offset_mask(x[0], x[1]),
elems=(result_dict['instance_centers_offset'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
else:
segmentation_outputs = tf.image.resize(
result_dict['segmentation_outputs'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_heatmap = tf.image.resize(
result_dict['instance_centers_heatmap'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_offset = mask_ops.resize_and_rescale_offsets(
result_dict['instance_centers_offset'],
target_size=self._config_dict['output_size'])
processed_dict = {}
(processed_dict['panoptic_outputs'],
processed_dict['instance_centers'],
processed_dict['instance_scores'],
_) = self._post_processor(
tf.nn.softmax(segmentation_outputs, axis=-1),
instance_centers_heatmap,
instance_centers_offset)
label_divisor = self._config_dict['label_divisor']
processed_dict['category_mask'] = (
processed_dict['panoptic_outputs'] // label_divisor)
processed_dict['instance_mask'] = (
processed_dict['panoptic_outputs'] % label_divisor)
processed_dict.update({
'segmentation_outputs': result_dict['segmentation_outputs']})
return processed_dict
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 23,569 | 40.42355 | 94 | py |
models | models-master/official/projects/panoptic/modeling/layers/paste_masks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definition for bilinear grid sampling and mask pasting layers."""
from typing import List
import tensorflow as tf
class BilinearGridSampler(tf.keras.layers.Layer):
"""Bilinear Grid Sampling layer."""
def __init__(self, align_corners: bool = False, **kwargs):
"""Generates panoptic segmentation masks.
Args:
align_corners: A `bool` bool, if True, the centers of the 4 corner
pixels of the input and output tensors are aligned, preserving the
values at the corner pixels.
**kwargs: Additional kwargs arguments.
"""
super(BilinearGridSampler, self).__init__(**kwargs)
self.align_corners = align_corners
self._config = {
'align_corners': align_corners
}
def build(self, input_shape):
features_shape, _, _ = input_shape
_, height, width, channels = features_shape.as_list()
self._height = height
self._width = width
self._channels = channels
def _valid_coordinates(self, x_coord, y_coord):
return tf.logical_and(
tf.logical_and(
tf.greater_equal(x_coord, 0),
tf.greater_equal(y_coord, 0)),
tf.logical_and(
tf.less(x_coord, self._width),
tf.less(y_coord, self._height)))
def _get_pixel(self, features, x_coord, y_coord):
x_coord = tf.cast(x_coord, dtype=tf.int32)
y_coord = tf.cast(y_coord, dtype=tf.int32)
clipped_x = tf.clip_by_value(x_coord, 0, self._width - 1)
clipped_y = tf.clip_by_value(y_coord, 0, self._height - 1)
batch_size, _, _, _ = features.shape.as_list()
if batch_size is None:
batch_size = tf.shape(features)[0]
batch_indices = tf.reshape(
tf.range(batch_size, dtype=tf.int32),
shape=[batch_size, 1, 1])
batch_indices = tf.tile(
batch_indices,
multiples=[1, x_coord.shape[1], x_coord.shape[2]])
indices = tf.cast(
tf.stack([batch_indices, clipped_y, clipped_x], axis=-1),
dtype=tf.int32)
gathered_pixels = tf.gather_nd(features, indices)
return tf.where(
tf.expand_dims(self._valid_coordinates(x_coord, y_coord), axis=-1),
gathered_pixels,
tf.zeros_like(gathered_pixels))
def call(self, inputs):
features, x_coord, y_coord = inputs
x_coord += 1
y_coord += 1
if self.align_corners:
x_coord = (x_coord * 0.5) * (self._width - 1)
y_coord = (y_coord * 0.5) * (self._height - 1)
else:
x_coord = (x_coord * self._width - 1) * 0.5
y_coord = (y_coord * self._height - 1) * 0.5
left = tf.floor(x_coord)
top = tf.floor(y_coord)
right = left + 1
bottom = top + 1
top_left = (right - x_coord) * (bottom - y_coord)
top_right = (x_coord - left) * (bottom - y_coord)
bottom_left = (right - x_coord) * (y_coord - top)
bottom_right = (x_coord - left) * (y_coord - top)
i_top_left = self._get_pixel(features, left, top)
i_top_right = self._get_pixel(features, right, top)
i_bottom_left = self._get_pixel(features, left, bottom)
i_bottom_right = self._get_pixel(features, right, bottom)
i_top_left *= tf.expand_dims(top_left, axis=-1)
i_top_right *= tf.expand_dims(top_right, axis=-1)
i_bottom_left *= tf.expand_dims(bottom_left, axis=-1)
i_bottom_right *= tf.expand_dims(bottom_right, axis=-1)
interpolated_features = tf.math.add_n(
[i_top_left, i_top_right, i_bottom_left, i_bottom_right])
return interpolated_features
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
class PasteMasks(tf.keras.layers.Layer):
"""Layer to paste instance masks."""
def __init__(self, output_size: List[int],
grid_sampler, **kwargs):
"""Resizes and pastes instance masks to match image size.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
grid_sampler: A grid sampling layer. Currently only `BilinearGridSampler`
is supported.
**kwargs: Additional kwargs arguments.
"""
super(PasteMasks, self).__init__(**kwargs)
self._output_size = output_size
self._grid_sampler = grid_sampler
self._config = {
'output_size': output_size,
'grid_sampler': grid_sampler
}
def build(self, input_shape):
self._x_coords = tf.range(0, self._output_size[1], dtype=tf.float32)
self._y_coords = tf.range(0, self._output_size[0], dtype=tf.float32)
def call(self, inputs):
masks, boxes = inputs
y0, x0, y1, x1 = tf.split(boxes, 4, axis=1)
x_coords = tf.cast(self._x_coords, dtype=boxes.dtype)
y_coords = tf.cast(self._y_coords, dtype=boxes.dtype)
x_coords = (x_coords - x0) / (x1 - x0) * 2 - 1
y_coords = (y_coords - y0) / (y1 - y0) * 2 - 1
x_coords = tf.tile(
tf.expand_dims(x_coords, axis=1),
multiples=[1, self._output_size[0], 1])
y_coords = tf.tile(
tf.expand_dims(y_coords, axis=2),
multiples=[1, 1, self._output_size[1]])
pasted_masks = self._grid_sampler((masks, x_coords, y_coords))
return pasted_masks
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 5,862 | 31.392265 | 79 | py |
models | models-master/official/projects/panoptic/modeling/layers/panoptic_segmentation_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definition for postprocessing layer to genrate panoptic segmentations."""
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from official.projects.panoptic.modeling.layers import paste_masks
from official.vision.ops import spatial_transform_ops
def _batch_count_ones(masks: tf.Tensor,
dtype: tf.dtypes.DType = tf.int32) -> tf.Tensor:
"""Counts the ones/trues for each mask in the batch.
Args:
masks: A tensor in shape (..., height, width) with arbitrary numbers of
batch dimensions.
dtype: DType of the resulting tensor. Default is tf.int32.
Returns:
A tensor which contains the count of non-zero elements for each mask in the
batch. The rank of the resulting tensor is equal to rank(masks) - 2.
"""
masks_shape = masks.get_shape().as_list()
if len(masks_shape) < 2:
raise ValueError(
'Expected the input masks (..., height, width) has rank >= 2, was: %s' %
masks_shape)
return tf.reduce_sum(tf.cast(masks, dtype), axis=[-2, -1])
class PanopticSegmentationGenerator(tf.keras.layers.Layer):
"""Panoptic segmentation generator layer."""
def __init__(
self,
output_size: List[int],
max_num_detections: int,
stuff_classes_offset: int,
mask_binarize_threshold: float = 0.5,
score_threshold: float = 0.5,
things_overlap_threshold: float = 0.5,
stuff_area_threshold: float = 4096,
things_class_label: int = 1,
void_class_label: int = 0,
void_instance_id: int = -1,
rescale_predictions: bool = False,
**kwargs):
"""Generates panoptic segmentation masks.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
max_num_detections: `int` for maximum number of detections.
stuff_classes_offset: An `int` that is added to the output of the
semantic segmentation mask to make sure that the stuff class ids do not
ovelap with the thing class ids of the MaskRCNN outputs.
mask_binarize_threshold: A `float`
score_threshold: A `float` representing the threshold for deciding
when to remove objects based on score.
things_overlap_threshold: A `float` representing a threshold for deciding
to ignore a thing if overlap is above the threshold.
stuff_area_threshold: A `float` representing a threshold for deciding to
to ignore a stuff class if area is below certain threshold.
things_class_label: An `int` that represents a single merged category of
all thing classes in the semantic segmentation output.
void_class_label: An `int` that is used to represent empty or unlabelled
regions of the mask
void_instance_id: An `int` that is used to denote regions that are not
assigned to any thing class. That is, void_instance_id are assigned to
both stuff regions and empty regions.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kewargs arguments.
"""
self._output_size = output_size
self._max_num_detections = max_num_detections
self._stuff_classes_offset = stuff_classes_offset
self._mask_binarize_threshold = mask_binarize_threshold
self._score_threshold = score_threshold
self._things_overlap_threshold = things_overlap_threshold
self._stuff_area_threshold = stuff_area_threshold
self._things_class_label = things_class_label
self._void_class_label = void_class_label
self._void_instance_id = void_instance_id
self._rescale_predictions = rescale_predictions
self._config_dict = {
'output_size': output_size,
'max_num_detections': max_num_detections,
'stuff_classes_offset': stuff_classes_offset,
'mask_binarize_threshold': mask_binarize_threshold,
'score_threshold': score_threshold,
'things_class_label': things_class_label,
'void_class_label': void_class_label,
'void_instance_id': void_instance_id,
'rescale_predictions': rescale_predictions
}
super().__init__(**kwargs)
def build(self, input_shape: tf.TensorShape):
grid_sampler = paste_masks.BilinearGridSampler(align_corners=False)
self._paste_masks_fn = paste_masks.PasteMasks(
output_size=self._output_size, grid_sampler=grid_sampler)
super().build(input_shape)
def _generate_panoptic_masks(
self, boxes: tf.Tensor, scores: tf.Tensor, classes: tf.Tensor,
detections_masks: tf.Tensor,
segmentation_mask: tf.Tensor) -> Dict[str, tf.Tensor]:
"""Generates panoptic masks for a single image.
This function implements the following steps to merge instance and semantic
segmentation masks described in https://arxiv.org/pdf/1901.02446.pdf
Steps:
1. resolving overlaps between different instances based on their
confidence scores
2. resolving overlaps between instance and semantic segmentation
outputs in favor of instances
3. removing any stuff regions labeled other or under a given area
threshold.
Args:
boxes: A `tf.Tensor` of shape [num_rois, 4], representing the bounding
boxes for detected objects.
scores: A `tf.Tensor` of shape [num_rois], representing the
confidence scores for each object.
classes: A `tf.Tensor` of shape [num_rois], representing the class
for each object.
detections_masks: A `tf.Tensor` of shape
[num_rois, mask_height, mask_width, 1], representing the cropped mask
for each object.
segmentation_mask: A `tf.Tensor` of shape [height, width], representing
the semantic segmentation output.
Returns:
Dict with the following keys:
- category_mask: A `tf.Tensor` for category masks.
- instance_mask: A `tf.Tensor for instance masks.
"""
# Offset stuff class predictions
segmentation_mask = tf.where(
tf.logical_or(
tf.equal(segmentation_mask, self._things_class_label),
tf.equal(segmentation_mask, self._void_class_label)),
segmentation_mask,
segmentation_mask + self._stuff_classes_offset
)
# sort instances by their scores
sorted_indices = tf.argsort(scores, direction='DESCENDING')
mask_shape = self._output_size + [1]
category_mask = tf.ones(mask_shape,
dtype=tf.float32) * self._void_class_label
instance_mask = tf.ones(
mask_shape, dtype=tf.float32) * self._void_instance_id
# filter instances with low confidence
sorted_scores = tf.sort(scores, direction='DESCENDING')
valid_indices = tf.where(sorted_scores > self._score_threshold)
# if no instance has sufficient confidence score, skip merging
# instance segmentation masks
if tf.shape(valid_indices)[0] > 0:
loop_end_idx = valid_indices[-1, 0] + 1
loop_end_idx = tf.minimum(
tf.cast(loop_end_idx, dtype=tf.int32),
self._max_num_detections)
pasted_masks = self._paste_masks_fn((
detections_masks[:loop_end_idx],
boxes[:loop_end_idx]))
# add things segmentation to panoptic masks
for i in range(loop_end_idx):
# we process instances in decending order, which will make sure
# the overlaps are resolved based on confidence score
instance_idx = sorted_indices[i]
pasted_mask = pasted_masks[instance_idx]
class_id = tf.cast(classes[instance_idx], dtype=tf.float32)
# convert sigmoid scores to binary values
binary_mask = tf.greater(
pasted_mask, self._mask_binarize_threshold)
# filter empty instance masks
if not tf.reduce_sum(tf.cast(binary_mask, tf.float32)) > 0:
continue
overlap = tf.logical_and(
binary_mask,
tf.not_equal(category_mask, self._void_class_label))
binary_mask_area = tf.reduce_sum(
tf.cast(binary_mask, dtype=tf.float32))
overlap_area = tf.reduce_sum(
tf.cast(overlap, dtype=tf.float32))
# skip instance that have a big enough overlap with instances with
# higer scores
if overlap_area / binary_mask_area > self._things_overlap_threshold:
continue
# fill empty regions in category_mask represented by
# void_class_label with class_id of the instance.
category_mask = tf.where(
tf.logical_and(
binary_mask, tf.equal(category_mask, self._void_class_label)),
tf.ones_like(category_mask) * class_id, category_mask)
# fill empty regions in the instance_mask represented by
# void_instance_id with the id of the instance, starting from 1
instance_mask = tf.where(
tf.logical_and(
binary_mask,
tf.equal(instance_mask, self._void_instance_id)),
tf.ones_like(instance_mask) *
tf.cast(instance_idx + 1, tf.float32), instance_mask)
stuff_class_ids = tf.unique(tf.reshape(segmentation_mask, [-1])).y
for stuff_class_id in stuff_class_ids:
if stuff_class_id == self._things_class_label:
continue
stuff_mask = tf.logical_and(
tf.equal(segmentation_mask, stuff_class_id),
tf.equal(category_mask, self._void_class_label))
stuff_mask_area = tf.reduce_sum(
tf.cast(stuff_mask, dtype=tf.float32))
if stuff_mask_area < self._stuff_area_threshold:
continue
category_mask = tf.where(
stuff_mask,
tf.ones_like(category_mask) * stuff_class_id,
category_mask)
results = {
'category_mask': category_mask[:, :, 0],
'instance_mask': instance_mask[:, :, 0]
}
return results
def _resize_and_pad_masks(self, mask, image_info):
"""Resizes masks to match the original image shape and pads to`output_size`.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = tf.image.resize(
mask,
rescale_size,
method='bilinear')
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0, self._output_size[0], self._output_size[1])
return mask
def call(self,
inputs: tf.Tensor,
image_info: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
detections = inputs
batched_scores = detections['detection_scores']
batched_classes = detections['detection_classes']
batched_detections_masks = tf.expand_dims(
detections['detection_masks'], axis=-1)
batched_boxes = detections['detection_boxes']
batched_segmentation_masks = tf.cast(
detections['segmentation_outputs'], dtype=tf.float32)
if self._rescale_predictions:
scale = tf.tile(
tf.cast(image_info[:, 2:3, :], dtype=batched_boxes.dtype),
multiples=[1, 1, 2])
batched_boxes /= scale
batched_segmentation_masks = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(
batched_segmentation_masks,
image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
else:
batched_segmentation_masks = tf.image.resize(
batched_segmentation_masks,
size=self._output_size,
method='bilinear')
batched_segmentation_masks = tf.expand_dims(tf.cast(
tf.argmax(batched_segmentation_masks, axis=-1),
dtype=tf.float32), axis=-1)
panoptic_masks = tf.map_fn(
fn=lambda x: self._generate_panoptic_masks( # pylint:disable=g-long-lambda
x[0], x[1], x[2], x[3], x[4]),
elems=(
batched_boxes,
batched_scores,
batched_classes,
batched_detections_masks,
batched_segmentation_masks),
fn_output_signature={
'category_mask': tf.float32,
'instance_mask': tf.float32
}, parallel_iterations=32)
for k, v in panoptic_masks.items():
panoptic_masks[k] = tf.cast(v, dtype=tf.int32)
return panoptic_masks
def get_config(self) -> Dict[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Dict[str,
Any]) -> 'PanopticSegmentationGenerator':
return cls(**config)
class PanopticSegmentationGeneratorV2(tf.keras.layers.Layer):
"""Panoptic segmentation generator layer V2."""
def __init__(self,
output_size: List[int],
max_num_detections: int,
stuff_classes_offset: int,
mask_binarize_threshold: float = 0.5,
score_threshold: float = 0.5,
things_overlap_threshold: float = 0.5,
stuff_area_threshold: float = 4096,
things_class_label: int = 1,
void_class_label: int = 0,
void_instance_id: int = -1,
rescale_predictions: bool = False,
**kwargs):
"""Generates panoptic segmentation masks.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
max_num_detections: `int` for maximum number of detections.
stuff_classes_offset: An `int` that is added to the output of the semantic
segmentation mask to make sure that the stuff class ids do not ovelap
with the thing class ids of the MaskRCNN outputs.
mask_binarize_threshold: A `float`
score_threshold: A `float` representing the threshold for deciding when to
remove objects based on score.
things_overlap_threshold: A `float` representing a threshold for deciding
to ignore a thing if overlap is above the threshold.
stuff_area_threshold: A `float` representing a threshold for deciding to
to ignore a stuff class if area is below certain threshold.
things_class_label: An `int` that represents a single merged category of
all thing classes in the semantic segmentation output.
void_class_label: An `int` that is used to represent empty or unlabelled
regions of the mask
void_instance_id: An `int` that is used to denote regions that are not
assigned to any thing class. That is, void_instance_id are assigned to
both stuff regions and empty regions.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kewargs arguments.
"""
self._output_size = output_size
self._max_num_detections = max_num_detections
self._stuff_classes_offset = stuff_classes_offset
self._mask_binarize_threshold = mask_binarize_threshold
self._score_threshold = score_threshold
self._things_overlap_threshold = things_overlap_threshold
self._stuff_area_threshold = stuff_area_threshold
self._things_class_label = things_class_label
self._void_class_label = void_class_label
self._void_instance_id = void_instance_id
self._rescale_predictions = rescale_predictions
self._config_dict = {
'output_size': output_size,
'max_num_detections': max_num_detections,
'stuff_classes_offset': stuff_classes_offset,
'mask_binarize_threshold': mask_binarize_threshold,
'score_threshold': score_threshold,
'things_class_label': things_class_label,
'void_class_label': void_class_label,
'void_instance_id': void_instance_id,
'rescale_predictions': rescale_predictions
}
super().__init__(**kwargs)
def call(self,
inputs: tf.Tensor,
image_info: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
"""Generates panoptic segmentation masks."""
# (batch_size, num_rois, 4) in absolute coordinates.
detection_boxes = tf.cast(inputs['detection_boxes'], tf.float32)
# (batch_size, num_rois)
detection_classes = tf.cast(inputs['detection_classes'], tf.int32)
# (batch_size, num_rois)
detection_scores = inputs['detection_scores']
# (batch_size, num_rois, mask_height, mask_width)
detections_masks = inputs['detection_masks']
# (batch_size, height, width, num_semantic_classes)
segmentation_outputs = inputs['segmentation_outputs']
if self._rescale_predictions:
# (batch_size, 2)
original_size = tf.cast(image_info[:, 0, :], tf.float32)
desired_size = tf.cast(image_info[:, 1, :], tf.float32)
image_scale = tf.cast(image_info[:, 2, :], tf.float32)
offset = tf.cast(image_info[:, 3, :], tf.float32)
rescale_size = tf.math.ceil(desired_size / image_scale)
# (batch_size, output_height, output_width, num_semantic_classes)
segmentation_outputs = (
spatial_transform_ops.bilinear_resize_with_crop_and_pad(
segmentation_outputs,
rescale_size,
crop_offset=offset,
crop_size=original_size,
output_size=self._output_size))
# (batch_size, 1, 4)
image_scale = tf.tile(image_scale, multiples=[1, 2])[:, tf.newaxis]
detection_boxes /= image_scale
else:
# (batch_size, output_height, output_width, num_semantic_classes)
segmentation_outputs = tf.image.resize(
segmentation_outputs, size=self._output_size, method='bilinear')
# (batch_size, output_height, output_width)
instance_mask, instance_category_mask = self._generate_instances(
detection_boxes, detection_classes, detection_scores, detections_masks)
# (batch_size, output_height, output_width)
stuff_category_mask = self._generate_stuffs(segmentation_outputs)
# (batch_size, output_height, output_width)
category_mask = tf.where((stuff_category_mask != self._void_class_label) &
(instance_category_mask == self._void_class_label),
stuff_category_mask + self._stuff_classes_offset,
instance_category_mask)
return {'instance_mask': instance_mask, 'category_mask': category_mask}
def _generate_instances(
self, detection_boxes: tf.Tensor, detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
detections_masks: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates instance & category masks from instance segmentation outputs."""
batch_size = tf.shape(detections_masks)[0]
num_rois = tf.shape(detections_masks)[1]
mask_height = tf.shape(detections_masks)[2]
mask_width = tf.shape(detections_masks)[3]
output_height = self._output_size[0]
output_width = self._output_size[1]
# (batch_size, num_rois, mask_height, mask_width)
detections_masks = detections_masks * (
tf.cast((detection_scores > self._score_threshold) &
(detection_classes != self._void_class_label),
detections_masks.dtype)[:, :, tf.newaxis, tf.newaxis])
# Resizes and copies the detections_masks to the bounding boxes in the
# output canvas.
# (batch_size, num_rois, output_height, output_width)
pasted_detection_masks = tf.reshape(
spatial_transform_ops.bilinear_resize_to_bbox(
tf.reshape(detections_masks, [-1, mask_height, mask_width]),
tf.reshape(detection_boxes, [-1, 4]), self._output_size),
shape=[-1, num_rois, output_height, output_width])
# (batch_size, num_rois, output_height, output_width)
instance_binary_masks = (
pasted_detection_masks > self._mask_binarize_threshold)
# Sorts detection related tensors by scores.
# (batch_size, num_rois)
sorted_detection_indices = tf.argsort(
detection_scores, axis=1, direction='DESCENDING')
# (batch_size, num_rois)
sorted_detection_classes = tf.gather(
detection_classes, sorted_detection_indices, batch_dims=1)
# (batch_size, num_rois, output_height, output_width)
sorted_instance_binary_masks = tf.gather(
instance_binary_masks, sorted_detection_indices, batch_dims=1)
# (batch_size, num_rois)
instance_areas = _batch_count_ones(
sorted_instance_binary_masks, dtype=tf.float32)
init_loop_vars = (
0, # i: the loop counter
tf.ones([batch_size, output_height, output_width], dtype=tf.int32) *
self._void_instance_id, # combined_instance_mask
tf.ones([batch_size, output_height, output_width], dtype=tf.int32) *
self._void_class_label # combined_category_mask
)
def _copy_instances_loop_body(
i: int, combined_instance_mask: tf.Tensor,
combined_category_mask: tf.Tensor) -> Tuple[int, tf.Tensor, tf.Tensor]:
"""Iterates the sorted detections and copies the instances."""
# (batch_size, output_height, output_width)
instance_binary_mask = sorted_instance_binary_masks[:, i]
# Masks out the instances that have a big enough overlap with the other
# instances with higher scores.
# (batch_size, )
overlap_areas = _batch_count_ones(
(combined_instance_mask != self._void_instance_id)
& instance_binary_mask,
dtype=tf.float32)
# (batch_size, )
instance_overlap_threshold_mask = tf.math.divide_no_nan(
overlap_areas, instance_areas[:, i]) < self._things_overlap_threshold
# (batch_size, output_height, output_width)
instance_binary_mask &= (
instance_overlap_threshold_mask[:, tf.newaxis, tf.newaxis]
& (combined_instance_mask == self._void_instance_id))
# Updates combined_instance_mask.
# (batch_size, )
instance_id = tf.cast(
sorted_detection_indices[:, i] + 1, # starting from 1
dtype=combined_instance_mask.dtype)
# (batch_size, output_height, output_width)
combined_instance_mask = tf.where(instance_binary_mask,
instance_id[:, tf.newaxis, tf.newaxis],
combined_instance_mask)
# Updates combined_category_mask.
# (batch_size, )
class_id = tf.cast(
sorted_detection_classes[:, i], dtype=combined_category_mask.dtype)
# (batch_size, output_height, output_width)
combined_category_mask = tf.where(instance_binary_mask,
class_id[:, tf.newaxis, tf.newaxis],
combined_category_mask)
# Returns the updated loop vars.
return (
i + 1, # Increment the loop counter i
combined_instance_mask,
combined_category_mask)
# (batch_size, output_height, output_width)
_, instance_mask, category_mask = tf.while_loop(
cond=lambda i, *_: i < num_rois,
body=_copy_instances_loop_body,
loop_vars=init_loop_vars,
parallel_iterations=32,
maximum_iterations=num_rois)
return instance_mask, category_mask
def _generate_stuffs(self, segmentation_outputs: tf.Tensor) -> tf.Tensor:
"""Generates category mask from semantic segmentation outputs."""
num_semantic_classes = tf.shape(segmentation_outputs)[3]
# (batch_size, output_height, output_width)
segmentation_masks = tf.argmax(
segmentation_outputs, axis=-1, output_type=tf.int32)
stuff_binary_masks = (segmentation_masks != self._things_class_label) & (
segmentation_masks != self._void_class_label)
# (batch_size, num_semantic_classes, output_height, output_width)
stuff_class_binary_masks = ((tf.one_hot(
segmentation_masks, num_semantic_classes, axis=1, dtype=tf.int32) == 1)
& tf.expand_dims(stuff_binary_masks, axis=1))
# Masks out the stuff class whose area is below the given threshold.
# (batch_size, num_semantic_classes)
stuff_class_areas = _batch_count_ones(
stuff_class_binary_masks, dtype=tf.float32)
# (batch_size, num_semantic_classes, output_height, output_width)
stuff_class_binary_masks &= tf.greater(
stuff_class_areas, self._stuff_area_threshold)[:, :, tf.newaxis,
tf.newaxis]
# (batch_size, output_height, output_width)
stuff_binary_masks = tf.reduce_any(stuff_class_binary_masks, axis=1)
# (batch_size, output_height, output_width)
return tf.where(stuff_binary_masks, segmentation_masks,
tf.ones_like(segmentation_masks) * self._void_class_label)
def get_config(self) -> Dict[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Dict[str,
Any]) -> 'PanopticSegmentationGeneratorV2':
return cls(**config)
| 25,846 | 40.823625 | 85 | py |
models | models-master/official/projects/panoptic/modeling/layers/fusion_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains feature fusion blocks for panoptic segmentation models."""
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
# Type annotations.
States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
class PanopticDeepLabFusion(tf.keras.layers.Layer):
"""Creates a Panoptic DeepLab feature Fusion layer.
This implements the feature fusion introduced in the paper:
Cheng et al. Panoptic-DeepLab
(https://arxiv.org/pdf/1911.10194.pdf)
"""
def __init__(
self,
level: int,
low_level: List[int],
num_projection_filters: List[int],
num_output_filters: int = 256,
use_depthwise_convolution: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
**kwargs):
"""Initializes panoptic FPN feature fusion layer.
Args:
level: An `int` level at which the decoder was appled at.
low_level: A list of `int` of minimum level to use in feature fusion.
num_projection_filters: A list of `int` with number of filters for
projection conv2d layers.
num_output_filters: An `int` number of filters in output conv2d layers.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
activation: A `str` name of the activation function.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
interpolation: A `str` interpolation method for upsampling. Defaults to
`bilinear`.
**kwargs: Additional keyword arguments to be passed.
Returns:
A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width,
feature_channel].
"""
super(PanopticDeepLabFusion, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'low_level': low_level,
'num_projection_filters': num_projection_filters,
'num_output_filters': num_output_filters,
'use_depthwise_convolution': use_depthwise_convolution,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'interpolation': interpolation
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: List[tf.TensorShape]):
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'padding': 'same',
'use_bias': True,
'kernel_initializer': tf.initializers.VarianceScaling(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._channel_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._projection_convs = []
self._projection_norms = []
self._fusion_convs = []
self._fusion_norms = []
for i in range(len(self._config_dict['low_level'])):
self._projection_convs.append(
conv_op(
filters=self._config_dict['num_projection_filters'][i],
kernel_size=1,
**conv_kwargs))
if self._config_dict['use_depthwise_convolution']:
depthwise_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
fusion_conv = tf.keras.Sequential([
tf.keras.layers.DepthwiseConv2D(
kernel_size=5,
padding='same',
use_bias=True,
depthwise_initializer=depthwise_initializer,
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1),
bn_op(**bn_kwargs),
conv_op(
filters=self._config_dict['num_output_filters'],
kernel_size=1,
**conv_kwargs)])
else:
fusion_conv = conv_op(
filters=self._config_dict['num_output_filters'],
kernel_size=5,
**conv_kwargs)
self._fusion_convs.append(fusion_conv)
self._projection_norms.append(bn_op(**bn_kwargs))
self._fusion_norms.append(bn_op(**bn_kwargs))
def call(self, inputs, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
backbone_output = inputs[0]
decoder_output = inputs[1][str(self._config_dict['level'])]
x = decoder_output
for i in range(len(self._config_dict['low_level'])):
feature = backbone_output[str(self._config_dict['low_level'][i])]
feature = self._projection_convs[i](feature)
feature = self._projection_norms[i](feature, training=training)
feature = self._activation(feature)
shape = tf.shape(feature)
x = tf.image.resize(
x, size=[shape[1], shape[2]],
method=self._config_dict['interpolation'])
x = tf.cast(x, dtype=feature.dtype)
x = tf.concat([x, feature], axis=self._channel_axis)
x = self._fusion_convs[i](x)
x = self._fusion_norms[i](x, training=training)
x = self._activation(x)
return x
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 6,881 | 37.022099 | 80 | py |
models | models-master/official/projects/panoptic/modeling/heads/panoptic_deeplab_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for Panoptic Deeplab heads."""
from typing import List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.panoptic.modeling.layers import fusion_layers
from official.vision.ops import spatial_transform_ops
class PanopticDeeplabHead(tf.keras.layers.Layer):
"""Creates a panoptic deeplab head."""
def __init__(
self,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a panoptic deeplab head.
Args:
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(PanopticDeeplabHead, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'kernel_size': kernel_size,
'use_depthwise_convolution': use_depthwise_convolution,
'upsample_factor': upsample_factor,
'low_level': low_level,
'low_level_num_filters': low_level_num_filters,
'fusion_num_output_filters': fusion_num_output_filters,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
kernel_size = self._config_dict['kernel_size']
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': kernel_size if not use_depthwise_convolution else 1,
'padding': 'same',
'use_bias': True,
'kernel_initializer': random_initializer,
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._panoptic_deeplab_fusion = fusion_layers.PanopticDeepLabFusion(
level=self._config_dict['level'],
low_level=self._config_dict['low_level'],
num_projection_filters=self._config_dict['low_level_num_filters'],
num_output_filters=self._config_dict['fusion_num_output_filters'],
use_depthwise_convolution=self
._config_dict['use_depthwise_convolution'],
activation=self._config_dict['activation'],
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
# Stacked convolutions layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution:
self._convs.append(
tf.keras.layers.DepthwiseConv2D(
name='panoptic_deeplab_head_depthwise_conv_{}'.format(i),
kernel_size=kernel_size,
padding='same',
use_bias=True,
depthwise_initializer=random_initializer,
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1))
norm_name = 'panoptic_deeplab_head_depthwise_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
conv_name = 'panoptic_deeplab_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
**conv_kwargs))
norm_name = 'panoptic_deeplab_head_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
super().build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
training: A bool, runs the model in training/eval mode.
Returns:
A `tf.Tensor` of the fused backbone and decoder features.
"""
if training is None:
training = tf.keras.backend.learning_phase()
x = self._panoptic_deeplab_fusion(inputs, training=training)
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
x = norm(x, training=training)
x = self._activation(x)
if self._config_dict['upsample_factor'] > 1:
x = spatial_transform_ops.nearest_upsampling(
x, scale=self._config_dict['upsample_factor'])
return x
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()) + list(self._config_dict.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SemanticHead(PanopticDeeplabHead):
"""Creates a semantic head."""
def __init__(
self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
prediction_kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a instance center head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(SemanticHead, self).__init__(
level=level,
num_convs=num_convs,
num_filters=num_filters,
use_depthwise_convolution=use_depthwise_convolution,
kernel_size=kernel_size,
upsample_factor=upsample_factor,
low_level=low_level,
low_level_num_filters=low_level_num_filters,
fusion_num_output_filters=fusion_num_output_filters,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
**kwargs)
self._config_dict.update({
'num_classes': num_classes,
'prediction_kernel_size': prediction_kernel_size})
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the semantic head."""
super(SemanticHead, self).build(input_shape)
self._classifier = tf.keras.layers.Conv2D(
name='semantic_output',
filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head."""
if training is None:
training = tf.keras.backend.learning_phase()
x = super(SemanticHead, self).call(inputs, training=training)
outputs = self._classifier(x)
return outputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class InstanceHead(PanopticDeeplabHead):
"""Creates a instance head."""
def __init__(
self,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
prediction_kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a instance center head.
Args:
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(InstanceHead, self).__init__(
level=level,
num_convs=num_convs,
num_filters=num_filters,
use_depthwise_convolution=use_depthwise_convolution,
kernel_size=kernel_size,
upsample_factor=upsample_factor,
low_level=low_level,
low_level_num_filters=low_level_num_filters,
fusion_num_output_filters=fusion_num_output_filters,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
**kwargs)
self._config_dict.update({
'prediction_kernel_size': prediction_kernel_size})
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the instance head."""
super(InstanceHead, self).build(input_shape)
self._instance_center_prediction_conv = tf.keras.layers.Conv2D(
name='instance_centers_heatmap',
filters=1,
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
self._instance_center_regression_conv = tf.keras.layers.Conv2D(
name='instance_centers_offset',
filters=2,
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head."""
if training is None:
training = tf.keras.backend.learning_phase()
x = super(InstanceHead, self).call(inputs, training=training)
instance_centers_heatmap = self._instance_center_prediction_conv(x)
instance_centers_offset = self._instance_center_regression_conv(x)
outputs = {
'instance_centers_heatmap': instance_centers_heatmap,
'instance_centers_offset': instance_centers_offset
}
return outputs
| 19,039 | 42.770115 | 80 | py |
models | models-master/official/projects/panoptic/tasks/panoptic_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Deeplab task definition."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.panoptic.configs import panoptic_deeplab as exp_cfg
from official.projects.panoptic.dataloaders import panoptic_deeplab_input
from official.projects.panoptic.losses import panoptic_deeplab_losses
from official.projects.panoptic.modeling import factory
from official.vision.dataloaders import input_reader_factory
from official.vision.evaluation import panoptic_quality_evaluator
from official.vision.evaluation import segmentation_metrics
@task_factory.register_task_cls(exp_cfg.PanopticDeeplabTask)
class PanopticDeeplabTask(base_task.Task):
"""A task for Panoptic Deeplab."""
def build_model(self):
"""Builds panoptic deeplab model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_panoptic_deeplab(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
# Note that image_info is always in the shape of [4, 2].
dummy_image_info = tf.keras.layers.Input([4, 2])
_ = model(dummy_images, dummy_image_info, training=False)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(semantic_decoder=model.semantic_decoder)
if not self.task_config.model.shared_decoder:
ckpt_items.update(instance_decoder=model.instance_decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds panoptic deeplab input."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = panoptic_deeplab_input.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id,
panoptic_category_mask_key=decoder_cfg.panoptic_category_mask_key,
panoptic_instance_mask_key=decoder_cfg.panoptic_instance_mask_key)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = panoptic_deeplab_input.Parser(
output_size=self.task_config.model.input_size[:2],
ignore_label=params.parser.ignore_label,
resize_eval_groundtruth=params.parser.resize_eval_groundtruth,
groundtruth_padded_size=params.parser.groundtruth_padded_size,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_type=params.parser.aug_type,
sigma=params.parser.sigma,
dtype=params.parser.dtype)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: Mapping[str, tf.Tensor],
model_outputs: Mapping[str, tf.Tensor],
aux_losses: Optional[Any] = None):
"""Panoptic deeplab losses.
Args:
labels: labels.
model_outputs: Output logits from panoptic deeplab.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
loss_config = self._task_config.losses
segmentation_loss_fn = (
panoptic_deeplab_losses.WeightedBootstrappedCrossEntropyLoss(
loss_config.label_smoothing,
loss_config.class_weights,
loss_config.ignore_label,
top_k_percent_pixels=loss_config.top_k_percent_pixels))
instance_center_heatmap_loss_fn = panoptic_deeplab_losses.CenterHeatmapLoss(
)
instance_center_offset_loss_fn = panoptic_deeplab_losses.CenterOffsetLoss()
semantic_weights = tf.cast(
labels['semantic_weights'],
dtype=model_outputs['instance_centers_heatmap'].dtype)
things_mask = tf.cast(
tf.squeeze(labels['things_mask'], axis=3),
dtype=model_outputs['instance_centers_heatmap'].dtype)
valid_mask = tf.cast(
tf.squeeze(labels['valid_mask'], axis=3),
dtype=model_outputs['instance_centers_heatmap'].dtype)
segmentation_loss = segmentation_loss_fn(
model_outputs['segmentation_outputs'],
labels['category_mask'],
sample_weight=semantic_weights)
instance_center_heatmap_loss = instance_center_heatmap_loss_fn(
model_outputs['instance_centers_heatmap'],
labels['instance_centers_heatmap'],
sample_weight=valid_mask)
instance_center_offset_loss = instance_center_offset_loss_fn(
model_outputs['instance_centers_offset'],
labels['instance_centers_offset'],
sample_weight=things_mask)
model_loss = (
loss_config.segmentation_loss_weight * segmentation_loss +
loss_config.center_heatmap_loss_weight * instance_center_heatmap_loss +
loss_config.center_offset_loss_weight * instance_center_offset_loss)
total_loss = model_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
losses = {
'total_loss': total_loss,
'model_loss': model_loss,
'segmentation_loss': segmentation_loss,
'instance_center_heatmap_loss': instance_center_heatmap_loss,
'instance_center_offset_loss': instance_center_offset_loss
}
return losses
def build_metrics(self, training: bool = True) -> List[
tf.keras.metrics.Metric]:
"""Build metrics."""
eval_config = self.task_config.evaluation
metrics = []
if training:
metric_names = [
'total_loss',
'segmentation_loss',
'instance_center_heatmap_loss',
'instance_center_offset_loss',
'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if eval_config.report_train_mean_iou:
self.train_mean_iou = segmentation_metrics.MeanIoU(
name='train_mean_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=False,
dtype=tf.float32)
else:
rescale_predictions = (not self.task_config.validation_data.parser
.resize_eval_groundtruth)
self.perclass_iou_metric = segmentation_metrics.PerClassIoU(
name='per_class_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=rescale_predictions,
dtype=tf.float32)
if self.task_config.model.generate_panoptic_masks:
self.panoptic_quality_metric = (
panoptic_quality_evaluator.PanopticQualityEvaluator(
num_categories=self.task_config.model.num_classes,
ignored_label=eval_config.ignored_label,
max_instances_per_category=eval_config
.max_instances_per_category,
offset=eval_config.offset,
is_thing=eval_config.is_thing,
rescale_predictions=eval_config.rescale_predictions))
return metrics
def train_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(
inputs=images,
image_info=labels['image_info'],
training=True)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
labels=labels,
model_outputs=outputs,
aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if self.task_config.evaluation.report_train_mean_iou:
segmentation_labels = {
'masks': labels['category_mask'],
'valid_masks': labels['valid_mask'],
'image_info': labels['image_info']
}
self.process_metrics(
metrics=[self.train_mean_iou],
labels=segmentation_labels,
model_outputs=outputs['segmentation_outputs'])
logs.update({
self.train_mean_iou.name:
self.train_mean_iou.result()
})
return logs
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
inputs=images,
image_info=labels['image_info'],
training=False)
logs = {self.loss: 0}
segmentation_labels = {
'masks': labels['category_mask'],
'valid_masks': labels['valid_mask'],
'image_info': labels['image_info']
}
self.perclass_iou_metric.update_state(segmentation_labels,
outputs['segmentation_outputs'])
if self.task_config.model.generate_panoptic_masks:
pq_metric_labels = {
'category_mask': tf.squeeze(labels['category_mask'], axis=3),
'instance_mask': tf.squeeze(labels['instance_mask'], axis=3),
'image_info': labels['image_info']
}
panoptic_outputs = {
'category_mask':
outputs['category_mask'],
'instance_mask':
outputs['instance_mask'],
}
logs.update({
self.panoptic_quality_metric.name:
(pq_metric_labels, panoptic_outputs)})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.perclass_iou_metric.reset_states()
state = [self.perclass_iou_metric]
if self.task_config.model.generate_panoptic_masks:
state += [self.panoptic_quality_metric]
if self.task_config.model.generate_panoptic_masks:
self.panoptic_quality_metric.update_state(
step_outputs[self.panoptic_quality_metric.name][0],
step_outputs[self.panoptic_quality_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
ious = self.perclass_iou_metric.result()
if self.task_config.evaluation.report_per_class_iou:
for i, value in enumerate(ious.numpy()):
result.update({'segmentation_iou/class_{}'.format(i): value})
# Computes mean IoU
result.update({'segmentation_mean_iou': tf.reduce_mean(ious).numpy()})
if self.task_config.model.generate_panoptic_masks:
panoptic_quality_results = self.panoptic_quality_metric.result()
for k, value in panoptic_quality_results.items():
if k.endswith('per_class'):
if self.task_config.evaluation.report_per_class_pq:
for i, per_class_value in enumerate(value):
metric_key = 'panoptic_quality/{}/class_{}'.format(k, i)
result[metric_key] = per_class_value
else:
continue
else:
result['panoptic_quality/{}'.format(k)] = value
return result
| 14,713 | 36.345178 | 80 | py |
models | models-master/official/projects/panoptic/tasks/panoptic_maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic MaskRCNN task definition."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import task_factory
from official.projects.panoptic.configs import panoptic_maskrcnn as exp_cfg
from official.projects.panoptic.dataloaders import panoptic_maskrcnn_input
from official.projects.panoptic.modeling import factory
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.evaluation import panoptic_quality
from official.vision.evaluation import segmentation_metrics
from official.vision.losses import segmentation_losses
from official.vision.tasks import maskrcnn
@task_factory.register_task_cls(exp_cfg.PanopticMaskRCNNTask)
class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
"""A single-replica view of training procedure.
Panoptic Mask R-CNN task provides artifacts for training/evalution procedures,
including loading/iterating over Datasets, initializing the model, calculating
the loss, post-processing, and customized metrics with reduction.
"""
def __init__(self,
params,
logging_dir: Optional[str] = None,
name: Optional[str] = None):
super().__init__(params, logging_dir=logging_dir, name=name)
self.segmentation_train_mean_iou = None
self.segmentation_perclass_iou_metric = None
self.panoptic_quality_metric = None
def build_model(self) -> tf.keras.Model:
"""Builds Panoptic Mask R-CNN model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_panoptic_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
# Note that image_info is always in the shape of [4, 2].
dummy_image_info = tf.keras.layers.Input([4, 2])
_ = model(dummy_images, image_info=dummy_image_info, training=False)
return model
def initialize(self, model: tf.keras.Model) -> None:
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
def _get_checkpoint_path(checkpoint_dir_or_file):
checkpoint_path = checkpoint_dir_or_file
if tf.io.gfile.isdir(checkpoint_dir_or_file):
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_dir_or_file)
return checkpoint_path
for init_module in self.task_config.init_checkpoint_modules:
# Restoring checkpoint.
if init_module == 'all':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'backbone':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
if self.task_config.model.backbone.type == 'uvit':
model.backbone.load_checkpoint(ckpt_filepath=checkpoint_path)
else:
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'decoder':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
ckpt = tf.train.Checkpoint(decoder=model.decoder)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'segmentation_backbone':
checkpoint_path = _get_checkpoint_path(
self.task_config.segmentation_init_checkpoint)
ckpt = tf.train.Checkpoint(
segmentation_backbone=model.segmentation_backbone)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'segmentation_decoder':
checkpoint_path = _get_checkpoint_path(
self.task_config.segmentation_init_checkpoint)
ckpt = tf.train.Checkpoint(
segmentation_decoder=model.segmentation_decoder)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all', 'backbone', 'decoder', 'segmentation_backbone' and/or "
"'segmentation_decoder' can be used to initialize the model, but "
"got {}".format(init_module))
logging.info('Finished loading pretrained checkpoint from %s for %s',
checkpoint_path, init_module)
def build_inputs(
self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds input dataset."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = panoptic_maskrcnn_input.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold,
include_panoptic_masks=decoder_cfg.include_panoptic_masks,
panoptic_category_mask_key=decoder_cfg.panoptic_category_mask_key,
panoptic_instance_mask_key=decoder_cfg.panoptic_instance_mask_key)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = panoptic_maskrcnn_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
rpn_match_threshold=params.parser.rpn_match_threshold,
rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
rpn_fg_fraction=params.parser.rpn_fg_fraction,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_rand_vflip=params.parser.aug_rand_vflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_type=params.parser.aug_type,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances,
outer_boxes_scale=self.task_config.model.outer_boxes_scale,
mask_crop_size=params.parser.mask_crop_size,
segmentation_resize_eval_groundtruth=params.parser
.segmentation_resize_eval_groundtruth,
segmentation_groundtruth_padded_size=params.parser
.segmentation_groundtruth_padded_size,
segmentation_ignore_label=params.parser.segmentation_ignore_label,
panoptic_ignore_label=params.parser.panoptic_ignore_label,
include_panoptic_masks=params.parser.include_panoptic_masks,
dtype=params.dtype,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training),
)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Dict[str, tf.Tensor]:
"""Builds Panoptic Mask R-CNN losses."""
params = self.task_config.losses
use_groundtruth_dimension = (
params.semantic_segmentation_use_groundtruth_dimension)
segmentation_loss_fn = segmentation_losses.SegmentationLoss(
label_smoothing=params.semantic_segmentation_label_smoothing,
class_weights=params.semantic_segmentation_class_weights,
ignore_label=params.semantic_segmentation_ignore_label,
gt_is_matting_map=params.semantic_segmentation_gt_is_matting_map,
use_groundtruth_dimension=use_groundtruth_dimension,
use_binary_cross_entropy=params
.semantic_segmentation_use_binary_cross_entropy,
top_k_percent_pixels=params.semantic_segmentation_top_k_percent_pixels)
instance_segmentation_weight = params.instance_segmentation_weight
semantic_segmentation_weight = params.semantic_segmentation_weight
losses = super().build_losses(
outputs=outputs,
labels=labels,
aux_losses=None)
maskrcnn_loss = losses['model_loss']
segmentation_loss = segmentation_loss_fn(
outputs['segmentation_outputs'],
labels['gt_segmentation_mask'])
model_loss = (
instance_segmentation_weight * maskrcnn_loss +
semantic_segmentation_weight * segmentation_loss)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
losses.update({
'total_loss': total_loss,
'maskrcnn_loss': maskrcnn_loss,
'segmentation_loss': segmentation_loss,
'model_loss': model_loss,
})
return losses
def build_metrics(
self, training: bool = True
) -> List[tf.keras.metrics.Metric]:
"""Builds detection metrics."""
metrics = super().build_metrics(training)
if training:
metric_names = ['maskrcnn_loss', 'segmentation_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if self.task_config.segmentation_evaluation.report_train_mean_iou:
self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
name='train_mean_iou',
num_classes=self.task_config.model.segmentation_model.num_classes,
rescale_predictions=False,
dtype=tf.float32,
)
else:
rescale_predictions = (
not self.task_config.validation_data.parser.segmentation_resize_eval_groundtruth
)
self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
name='per_class_iou',
num_classes=self.task_config.model.segmentation_model.num_classes,
rescale_predictions=rescale_predictions,
dtype=tf.float32,
)
if (
self.task_config.model.generate_panoptic_masks
and self.task_config.panoptic_quality_evaluator is not None
):
if not self.task_config.validation_data.parser.include_panoptic_masks:
raise ValueError(
'`include_panoptic_masks` should be set to True when'
' computing panoptic quality.'
)
pq_config = self.task_config.panoptic_quality_evaluator
self.panoptic_quality_metric = panoptic_quality.PanopticQualityV2(
num_categories=pq_config.num_categories,
is_thing=pq_config.is_thing,
ignored_label=pq_config.ignored_label,
rescale_predictions=pq_config.rescale_predictions,
)
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
model_kwargs = {
'image_info': labels['image_info'],
'anchor_boxes': labels['anchor_boxes'],
'gt_boxes': labels['gt_boxes'],
'gt_classes': labels['gt_classes'],
'training': True,
}
if self.task_config.model.include_mask:
model_kwargs['gt_masks'] = labels['gt_masks']
if self.task_config.model.outer_boxes_scale > 1.0:
model_kwargs['gt_outer_boxes'] = labels['gt_outer_boxes']
outputs = model(images, **model_kwargs)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if (self.task_config.segmentation_evaluation.report_train_mean_iou and
self.segmentation_train_mean_iou is not None):
segmentation_labels = {
'masks': labels['gt_segmentation_mask'],
'valid_masks': labels['gt_segmentation_valid_mask'],
'image_info': labels['image_info']
}
self.process_metrics(
metrics=[self.segmentation_train_mean_iou],
labels=segmentation_labels,
model_outputs=outputs['segmentation_outputs'])
logs.update({
self.segmentation_train_mean_iou.name:
self.segmentation_train_mean_iou.result()
})
return logs
def _update_metrics(self, labels, outputs, logs):
super()._update_metrics(labels, outputs, logs)
if self.segmentation_perclass_iou_metric is not None:
segmentation_labels = {
'masks': labels['groundtruths']['gt_segmentation_mask'],
'valid_masks': labels['groundtruths']['gt_segmentation_valid_mask'],
'image_info': labels['image_info'],
}
self.segmentation_perclass_iou_metric.update_state(
segmentation_labels, outputs['segmentation_outputs']
)
if self.panoptic_quality_metric is not None:
pq_metric_labels = {
'category_mask': labels['groundtruths']['gt_panoptic_category_mask'],
'instance_mask': labels['groundtruths']['gt_panoptic_instance_mask'],
'image_info': labels['image_info'],
}
self.panoptic_quality_metric.update_state(
pq_metric_labels, outputs['panoptic_outputs']
)
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None,
) -> Dict[str, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
images,
anchor_boxes=labels['anchor_boxes'],
image_info=labels['image_info'],
training=False,
)
logs = {self.loss: 0}
self._update_metrics(labels, outputs, logs)
return logs
def aggregate_logs(self, state=None, step_outputs=None):
is_first_step = not state
super().aggregate_logs(state, step_outputs)
if is_first_step:
if not isinstance(state, list):
state = []
if self.segmentation_perclass_iou_metric is not None:
state.append(self.segmentation_perclass_iou_metric)
if self.panoptic_quality_metric is not None:
state.append(self.panoptic_quality_metric)
if not state:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def _reduce_semantic_metrics(self, logs: Dict[str, Any]):
"""Updates the per class and mean semantic metrics in the logs."""
ious = self.segmentation_perclass_iou_metric.result()
if self.task_config.segmentation_evaluation.report_per_class_iou:
for i, value in enumerate(ious.numpy()):
logs.update({'segmentation_iou/class_{}'.format(i): value})
logs.update({'segmentation_mean_iou': tf.reduce_mean(ious)})
def _reduce_panoptic_metrics(self, logs: Dict[str, Any]):
"""Updates the per class and mean panoptic metrics in the logs."""
result = self.panoptic_quality_metric.result()
valid_thing_classes = result['valid_thing_classes']
valid_stuff_classes = result['valid_stuff_classes']
valid_classes = valid_stuff_classes | valid_thing_classes
num_categories = tf.math.count_nonzero(valid_classes, dtype=tf.float32)
num_thing_categories = tf.math.count_nonzero(
valid_thing_classes, dtype=tf.float32
)
num_stuff_categories = tf.math.count_nonzero(
valid_stuff_classes, dtype=tf.float32
)
valid_thing_classes = tf.cast(valid_thing_classes, dtype=tf.float32)
valid_stuff_classes = tf.cast(valid_stuff_classes, dtype=tf.float32)
logs['panoptic_quality/All_num_categories'] = num_categories
logs['panoptic_quality/Things_num_categories'] = num_thing_categories
logs['panoptic_quality/Stuff_num_categories'] = num_stuff_categories
for metric in ['pq', 'sq', 'rq']:
metric_per_class = result[f'{metric}_per_class']
logs[f'panoptic_quality/All_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class), num_categories
)
logs[f'panoptic_quality/Things_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class * valid_thing_classes),
num_thing_categories,
)
logs[f'panoptic_quality/Stuff_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class * valid_stuff_classes),
num_stuff_categories,
)
if self.task_config.panoptic_quality_evaluator.report_per_class_metrics:
for i, is_valid in enumerate(valid_classes.numpy()):
if is_valid:
logs[f'panoptic_quality/{metric}/class_{i}'] = metric_per_class[i]
def reduce_aggregated_logs(
self,
aggregated_logs: Dict[str, Any],
global_step: Optional[tf.Tensor] = None,
) -> Dict[str, tf.Tensor]:
"""Optional reduce of aggregated logs over validation steps."""
logs = super().reduce_aggregated_logs(aggregated_logs, global_step)
if self.segmentation_perclass_iou_metric is not None:
self._reduce_semantic_metrics(logs)
self.segmentation_perclass_iou_metric.reset_state()
if self.panoptic_quality_metric is not None:
self._reduce_panoptic_metrics(logs)
self.panoptic_quality_metric.reset_state()
return logs
| 20,323 | 39.007874 | 90 | py |
models | models-master/official/projects/basnet/evaluation/metrics_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.basnet.evaluation import metrics
class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
def test_mae(self):
input_size = 224
inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),)
mae_obj = metrics.MAE()
mae_obj.reset_states()
mae_obj.update_state(labels, inputs)
output = mae_obj.result()
mae_tf = tf.keras.metrics.MeanAbsoluteError()
mae_tf.reset_state()
mae_tf.update_state(labels[0], inputs[0])
compare = mae_tf.result().numpy()
self.assertAlmostEqual(output, compare, places=4)
def test_max_f(self):
input_size = 224
beta = 0.3
inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),)
max_f_obj = metrics.MaxFscore()
max_f_obj.reset_states()
max_f_obj.update_state(labels, inputs)
output = max_f_obj.result()
pre_tf = tf.keras.metrics.Precision(thresholds=0.78)
rec_tf = tf.keras.metrics.Recall(thresholds=0.78)
pre_tf.reset_state()
rec_tf.reset_state()
pre_tf.update_state(labels[0], inputs[0])
rec_tf.update_state(labels[0], inputs[0])
pre_out_tf = pre_tf.result().numpy()
rec_out_tf = rec_tf.result().numpy()
compare = (1+beta)*pre_out_tf*rec_out_tf/(beta*pre_out_tf+rec_out_tf+1e-8)
self.assertAlmostEqual(output, compare, places=1)
if __name__ == '__main__':
tf.test.main()
| 2,195 | 30.826087 | 78 | py |
models | models-master/official/projects/basnet/serving/basnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export module for BASNet."""
import tensorflow as tf
from official.projects.basnet.tasks import basnet
from official.vision.serving import semantic_segmentation
class BASNetModule(semantic_segmentation.SegmentationModule):
"""BASNet Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return basnet.build_basnet_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs, elems=images,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32),
parallel_iterations=32
)
)
masks = self.inference_step(images)
keys = sorted(masks.keys())
output = tf.image.resize(
masks[keys[-1]],
self._input_image_size, method='bilinear')
return dict(predicted_masks=output)
| 1,982 | 30.47619 | 74 | py |
models | models-master/official/projects/basnet/modeling/refunet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RefUNet model."""
import tensorflow as tf
from official.projects.basnet.modeling import nn_blocks
@tf.keras.utils.register_keras_serializable(package='Vision')
class RefUnet(tf.keras.layers.Layer):
"""Residual Refinement Module of BASNet.
Boundary-Aware network (BASNet) were proposed in:
[1] Qin, Xuebin, et al.
Basnet: Boundary-aware salient object detection.
"""
def __init__(self,
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Residual Refinement Module of BASNet.
Args:
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in conv2d.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
**kwargs: keyword arguments to be passed.
"""
super(RefUnet, self).__init__(**kwargs)
self._config_dict = {
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._concat = tf.keras.layers.Concatenate(axis=-1)
self._sigmoid = tf.keras.layers.Activation(activation='sigmoid')
self._maxpool = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=2,
padding='valid')
self._upsample = tf.keras.layers.UpSampling2D(
size=2,
interpolation='bilinear')
def build(self, input_shape):
"""Creates the variables of the BASNet decoder."""
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': 3,
'strides': 1,
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._in_conv = conv_op(
filters=64,
padding='same',
**conv_kwargs)
self._en_convs = []
for _ in range(4):
self._en_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._bridge_convs = []
for _ in range(1):
self._bridge_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._de_convs = []
for _ in range(4):
self._de_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._out_conv = conv_op(
filters=1,
padding='same',
**conv_kwargs)
def call(self, inputs):
endpoints = {}
residual = inputs
x = self._in_conv(inputs)
# Top-down
for i, block in enumerate(self._en_convs):
x = block(x)
endpoints[str(i)] = x
x = self._maxpool(x)
# Bridge
for i, block in enumerate(self._bridge_convs):
x = block(x)
# Bottom-up
for i, block in enumerate(self._de_convs):
dtype = x.dtype
x = tf.cast(x, tf.float32)
x = self._upsample(x)
x = tf.cast(x, dtype)
x = self._concat([endpoints[str(3-i)], x])
x = block(x)
x = self._out_conv(x)
residual = tf.cast(residual, dtype=x.dtype)
output = self._sigmoid(x + residual)
self._output_specs = output.get_shape()
return output
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
return self._output_specs
| 5,415 | 31.626506 | 78 | py |
models | models-master/official/projects/basnet/modeling/basnet_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for basnet network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.basnet.modeling import basnet_model
from official.projects.basnet.modeling import refunet
class BASNetNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(256),
(512),
)
def test_basnet_network_creation(
self, input_size):
"""Test for creation of a segmentation network."""
inputs = np.random.rand(2, input_size, input_size, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = basnet_model.BASNetEncoder()
decoder = basnet_model.BASNetDecoder()
refinement = refunet.RefUnet()
model = basnet_model.BASNetModel(
backbone=backbone,
decoder=decoder,
refinement=refinement
)
sigmoids = model(inputs)
levels = sorted(sigmoids.keys())
self.assertAllEqual(
[2, input_size, input_size, 1],
sigmoids[levels[-1]].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
backbone = basnet_model.BASNetEncoder()
decoder = basnet_model.BASNetDecoder()
refinement = refunet.RefUnet()
model = basnet_model.BASNetModel(
backbone=backbone,
decoder=decoder,
refinement=refinement
)
config = model.get_config()
new_model = basnet_model.BASNetModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,356 | 29.61039 | 79 | py |
models | models-master/official/projects/basnet/modeling/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for BasNet model."""
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A (Conv+BN+Activation) block."""
def __init__(self,
filters,
strides,
dilation_rate=1,
kernel_size=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_bias=False,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A vgg block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
dilation_rate: `int`, dilation rate for conv layers.
kernel_size: `int`, kernel size of conv layers.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_bias: `bool`, whether or not use bias in conv layers.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ConvBlock, self).__init__(**kwargs)
self._config_dict = {
'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'dilation_rate': dilation_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon
}
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
conv_kwargs = {
'padding': 'same',
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._conv0 = tf.keras.layers.Conv2D(
filters=self._config_dict['filters'],
kernel_size=self._config_dict['kernel_size'],
strides=self._config_dict['strides'],
dilation_rate=self._config_dict['dilation_rate'],
**conv_kwargs)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
super(ConvBlock, self).build(input_shape)
def get_config(self):
return self._config_dict
def call(self, inputs, training=None):
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_fn(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
use_bias=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""Initializes a residual block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
use_bias: A `bool`. If True, use bias in conv2d.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(ResBlock, self).__init__(**kwargs)
self._config_dict = {
'filters': filters,
'strides': strides,
'use_projection': use_projection,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon
}
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
conv_kwargs = {
'filters': self._config_dict['filters'],
'padding': 'same',
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if self._config_dict['use_projection']:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._config_dict['filters'],
kernel_size=1,
strides=self._config_dict['strides'],
use_bias=self._config_dict['use_bias'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
self._conv1 = tf.keras.layers.Conv2D(
kernel_size=3,
strides=self._config_dict['strides'],
**conv_kwargs)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
self._conv2 = tf.keras.layers.Conv2D(
kernel_size=3,
strides=1,
**conv_kwargs)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
super(ResBlock, self).build(input_shape)
def get_config(self):
return self._config_dict
def call(self, inputs, training=None):
shortcut = inputs
if self._config_dict['use_projection']:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
return self._activation_fn(x + shortcut)
| 9,180 | 36.321138 | 80 | py |
models | models-master/official/projects/basnet/modeling/basnet_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build BASNet models."""
from typing import Mapping
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.basnet.modeling import nn_blocks
from official.vision.modeling.backbones import factory
# Specifications for BASNet encoder.
# Each element in the block configuration is in the following format:
# (num_filters, stride, block_repeats, maxpool)
BASNET_ENCODER_SPECS = [
(64, 1, 3, 0), # ResNet-34,
(128, 2, 4, 0), # ResNet-34,
(256, 2, 6, 0), # ResNet-34,
(512, 2, 3, 1), # ResNet-34,
(512, 1, 3, 1), # BASNet,
(512, 1, 3, 0), # BASNet,
]
# Specifications for BASNet decoder.
# Each element in the block configuration is in the following format:
# (conv1_nf, conv1_dr, convm_nf, convm_dr, conv2_nf, conv2_dr, scale_factor)
# nf : num_filters, dr : dilation_rate
BASNET_BRIDGE_SPECS = [
(512, 2, 512, 2, 512, 2, 32), # Sup0, Bridge
]
BASNET_DECODER_SPECS = [
(512, 1, 512, 2, 512, 2, 32), # Sup1, stage6d
(512, 1, 512, 1, 512, 1, 16), # Sup2, stage5d
(512, 1, 512, 1, 256, 1, 8), # Sup3, stage4d
(256, 1, 256, 1, 128, 1, 4), # Sup4, stage3d
(128, 1, 128, 1, 64, 1, 2), # Sup5, stage2d
(64, 1, 64, 1, 64, 1, 1) # Sup6, stage1d
]
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetModel(tf.keras.Model):
"""A BASNet model.
Boundary-Awar network (BASNet) were proposed in:
[1] Qin, Xuebin, et al.
Basnet: Boundary-aware salient object detection.
Input images are passed through backbone first. Decoder network is then
applied, and finally, refinement module is applied on the output of the
decoder network.
"""
def __init__(self,
backbone,
decoder,
refinement=None,
**kwargs):
"""BASNet initialization function.
Args:
backbone: a backbone network. basnet_encoder.
decoder: a decoder network. basnet_decoder.
refinement: a module for salient map refinement.
**kwargs: keyword arguments to be passed.
"""
super(BASNetModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'refinement': refinement,
}
self.backbone = backbone
self.decoder = decoder
self.refinement = refinement
def call(self, inputs, training=None): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
features = self.backbone(inputs)
if self.decoder:
features = self.decoder(features)
levels = sorted(features.keys())
new_key = str(len(levels))
if self.refinement:
features[new_key] = self.refinement(features[levels[-1]])
return features
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone)
if self.decoder is not None:
items.update(decoder=self.decoder)
if self.refinement is not None:
items.update(refinement=self.refinement)
return items
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetEncoder(tf.keras.Model):
"""BASNet encoder."""
def __init__(
self,
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""BASNet encoder initialization function.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in conv2d.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._use_bias = use_bias
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build BASNet Encoder.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1,
use_bias=self._use_bias, padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
endpoints = {}
for i, spec in enumerate(BASNET_ENCODER_SPECS):
x = self._block_group(
inputs=x,
filters=spec[0],
strides=spec[1],
block_repeats=spec[2],
name='block_group_l{}'.format(i + 2))
endpoints[str(i)] = x
if spec[3]:
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same')(x)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(BASNetEncoder, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs,
filters,
strides,
block_repeats=1,
name='block_group'):
"""Creates one group of residual blocks for the BASNet encoder model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
block_repeats: `int` number of blocks contained in the layer.
name: `str`name for the block.
Returns:
The output `Tensor` of the block layer.
"""
x = nn_blocks.ResBlock(
filters=filters,
strides=strides,
use_projection=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_bias=self._use_bias,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = nn_blocks.ResBlock(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_bias=self._use_bias,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('basnet_encoder')
def build_basnet_encoder(
input_specs: tf.keras.layers.InputSpec,
model_config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds BASNet Encoder backbone from a config."""
backbone_type = model_config.backbone.type
norm_activation_config = model_config.norm_activation
assert backbone_type == 'basnet_encoder', (f'Inconsistent backbone type '
f'{backbone_type}')
return BASNetEncoder(
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=norm_activation_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetDecoder(tf.keras.layers.Layer):
"""BASNet decoder."""
def __init__(self,
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""BASNet decoder initialization function.
Args:
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in convolution.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
"""
super(BASNetDecoder, self).__init__(**kwargs)
self._config_dict = {
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._activation = tf_utils.get_activation(activation)
self._concat = tf.keras.layers.Concatenate(axis=-1)
self._sigmoid = tf.keras.layers.Activation(activation='sigmoid')
def build(self, input_shape):
"""Creates the variables of the BASNet decoder."""
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': 3,
'strides': 1,
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._out_convs = []
self._out_usmps = []
# Bridge layers.
self._bdg_convs = []
for spec in BASNET_BRIDGE_SPECS:
blocks = []
for j in range(3):
blocks.append(nn_blocks.ConvBlock(
filters=spec[2*j],
dilation_rate=spec[2*j+1],
activation='relu',
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=0.99,
norm_epsilon=0.001,
**conv_kwargs))
self._bdg_convs.append(blocks)
self._out_convs.append(conv_op(
filters=1,
padding='same',
**conv_kwargs))
self._out_usmps.append(tf.keras.layers.UpSampling2D(
size=spec[6],
interpolation='bilinear'
))
# Decoder layers.
self._dec_convs = []
for spec in BASNET_DECODER_SPECS:
blocks = []
for j in range(3):
blocks.append(nn_blocks.ConvBlock(
filters=spec[2*j],
dilation_rate=spec[2*j+1],
activation='relu',
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=0.99,
norm_epsilon=0.001,
**conv_kwargs))
self._dec_convs.append(blocks)
self._out_convs.append(conv_op(
filters=1,
padding='same',
**conv_kwargs))
self._out_usmps.append(tf.keras.layers.UpSampling2D(
size=spec[6],
interpolation='bilinear'
))
def call(self, backbone_output: Mapping[str, tf.Tensor]):
"""Forward pass of the BASNet decoder.
Args:
backbone_output: A `dict` of tensors
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
Returns:
sup: A `dict` of tensors
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
"""
levels = sorted(backbone_output.keys(), reverse=True)
sup = {}
x = backbone_output[levels[0]]
for blocks in self._bdg_convs:
for block in blocks:
x = block(x)
sup['0'] = x
for i, blocks in enumerate(self._dec_convs):
x = self._concat([x, backbone_output[levels[i]]])
for block in blocks:
x = block(x)
sup[str(i+1)] = x
x = tf.keras.layers.UpSampling2D(
size=2,
interpolation='bilinear'
)(x)
for i, (conv, usmp) in enumerate(zip(self._out_convs, self._out_usmps)):
sup[str(i)] = self._sigmoid(usmp(conv(sup[str(i)])))
self._output_specs = {
str(order): sup[str(order)].get_shape()
for order in range(0, len(BASNET_DECODER_SPECS))
}
return sup
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {order: TensorShape} pairs for the model output."""
return self._output_specs
| 15,095 | 33.076749 | 139 | py |
models | models-master/official/projects/basnet/tasks/basnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BASNet task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.basnet.configs import basnet as exp_cfg
from official.projects.basnet.evaluation import metrics as basnet_metrics
from official.projects.basnet.losses import basnet_losses
from official.projects.basnet.modeling import basnet_model
from official.projects.basnet.modeling import refunet
from official.vision.dataloaders import segmentation_input
def build_basnet_model(
input_specs: tf.keras.layers.InputSpec,
model_config: exp_cfg.BASNetModel,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds BASNet model."""
norm_activation_config = model_config.norm_activation
backbone = basnet_model.BASNetEncoder(
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
decoder = basnet_model.BASNetDecoder(
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
refinement = refunet.RefUnet(
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model = basnet_model.BASNetModel(backbone, decoder, refinement)
return model
@task_factory.register_task_cls(exp_cfg.BASNetTask)
class BASNetTask(base_task.Task):
"""A task for basnet."""
def build_model(self):
"""Builds basnet model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = build_basnet_model(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds BASNet input."""
ignore_label = self.task_config.losses.ignore_label
decoder = segmentation_input.Decoder()
parser = segmentation_input.Parser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, label, model_outputs, aux_losses=None):
"""Hybrid loss proposed in BASNet.
Args:
label: label.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
basnet_loss_fn = basnet_losses.BASNetLoss()
total_loss = basnet_loss_fn(model_outputs, label['masks'])
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self, training=False):
"""Gets streaming metrics for training/validation."""
evaluations = []
if training:
evaluations = []
else:
self.mae_metric = basnet_metrics.MAE()
self.maxf_metric = basnet_metrics.MaxFscore()
self.relaxf_metric = basnet_metrics.RelaxedFscore()
return evaluations
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, label=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = 0
logs = {self.loss: loss}
levels = sorted(outputs.keys())
logs.update(
{self.mae_metric.name: (labels['masks'], outputs[levels[-1]])})
logs.update(
{self.maxf_metric.name: (labels['masks'], outputs[levels[-1]])})
logs.update(
{self.relaxf_metric.name: (labels['masks'], outputs[levels[-1]])})
return logs
def inference_step(self, inputs, model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.mae_metric.reset_states()
self.maxf_metric.reset_states()
self.relaxf_metric.reset_states()
state = self.mae_metric
self.mae_metric.update_state(
step_outputs[self.mae_metric.name][0],
step_outputs[self.mae_metric.name][1])
self.maxf_metric.update_state(
step_outputs[self.maxf_metric.name][0],
step_outputs[self.maxf_metric.name][1])
self.relaxf_metric.update_state(
step_outputs[self.relaxf_metric.name][0],
step_outputs[self.relaxf_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
result['MAE'] = self.mae_metric.result()
result['maxF'] = self.maxf_metric.result()
result['relaxF'] = self.relaxf_metric.result()
return result
| 9,905 | 34.378571 | 79 | py |
models | models-master/official/projects/basnet/losses/basnet_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for BASNet models."""
import tensorflow as tf
EPSILON = 1e-5
class BASNetLoss:
"""BASNet hybrid loss."""
def __init__(self):
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=False)
self._ssim = tf.image.ssim
def __call__(self, sigmoids, labels):
levels = sorted(sigmoids.keys())
labels_bce = tf.squeeze(labels, axis=-1)
labels = tf.cast(labels, tf.float32)
bce_losses = []
ssim_losses = []
iou_losses = []
for level in levels:
bce_losses.append(
self._binary_crossentropy(labels_bce, sigmoids[level]))
ssim_losses.append(
1 - self._ssim(sigmoids[level], labels, max_val=1.0))
iou_losses.append(
self._iou_loss(sigmoids[level], labels))
total_bce_loss = tf.math.add_n(bce_losses)
total_ssim_loss = tf.math.add_n(ssim_losses)
total_iou_loss = tf.math.add_n(iou_losses)
total_loss = total_bce_loss + total_ssim_loss + total_iou_loss
total_loss = total_loss / len(levels)
return total_loss
def _iou_loss(self, sigmoids, labels):
total_iou_loss = 0
intersection = tf.reduce_sum(sigmoids[:, :, :, :] * labels[:, :, :, :])
union = tf.reduce_sum(sigmoids[:, :, :, :]) + tf.reduce_sum(
labels[:, :, :, :]) - intersection
iou = intersection / union
total_iou_loss += 1-iou
return total_iou_loss
| 2,047 | 30.030303 | 75 | py |
models | models-master/official/vision/evaluation/iou.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IOU Metrics used for semantic segmentation models."""
from typing import Any, Dict, Optional, Sequence, Union
import numpy as np
import tensorflow as tf
class PerClassIoU(tf.keras.metrics.MeanIoU):
"""Computes the per-class Intersection-Over-Union metric.
This metric computes the IOU for each semantic class.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = [(1 / (2 + 2 - 1), 1 / (2 + 2 - 1)] = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
[0.33333334, 0.33333334]
"""
def result(self):
"""Compute IoU for each class via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
class PerClassIoUV2(tf.keras.metrics.Metric):
"""Computes the per-class Intersection-Over-Union metric.
This implementation converts predictions and ground-truth to binary masks,
and uses logical AND and OR to compute intersection and union, which is much
faster than the PerClassIoU (using confusion matrix) above on TPU, but slower
on CPU and GPU.
"""
def __init__(self,
num_classes: int,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
shape: Optional[Sequence[int]] = None,
sparse_y_true: bool = False,
sparse_y_pred: bool = False,
axis: int = -1):
"""Initialization for PerClassIoU.
Args:
num_classes: `int`, number of classes.
name: `str`, name of the metric instance.
dtype: data type of the metric result.
shape: shape of the metrics result.
sparse_y_true: whether ground truth labels are encoded using integers or
dense one-hot vectors.
sparse_y_pred: whether predictions are encoded using integers or dense
one-hot vectors.
axis: (Optional) Defaults to -1. The dimension containing the one-hot
values.
"""
super().__init__(name=name, dtype=dtype)
self.num_classes = num_classes
self.sparse_y_true = sparse_y_true
self.sparse_y_pred = sparse_y_pred
self.axis = axis
# Variable to accumulate the intersection & union.
# intersection = true_positives
if not shape:
shape = [num_classes]
self.intersection_per_class = self.add_weight(
'intersection_per_class', shape, initializer='zeros', dtype=tf.float32)
# union = true_positives + false_positive + false_negative
self.union_per_class = self.add_weight(
'union_per_class', shape, initializer='zeros', dtype=tf.float32)
def reset_state(self):
"""Resets all of the metric state variables."""
self.intersection_per_class.assign(
tf.zeros_like(self.intersection_per_class)
)
self.union_per_class.assign(tf.zeros_like(self.union_per_class))
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state by accumulating the variables.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
"""
if self.sparse_y_true:
# Shape: (..., num_classes, ...)
y_true = tf.one_hot(
tf.cast(y_true, dtype=tf.int32),
self.num_classes,
axis=self.axis,
on_value=True,
off_value=False,
)
if self.sparse_y_pred:
# Shape: (..., num_classes, ...)
y_pred = tf.one_hot(
tf.cast(y_pred, dtype=tf.int32),
self.num_classes,
axis=self.axis,
on_value=True,
off_value=False,
)
one_hot_axis = self.axis if self.axis >= 0 else (
len(y_true.get_shape().as_list()) + self.axis)
# Reduce sum the leading dimensions.
# Shape: (num_classes, ...)
current_intersection = tf.math.count_nonzero(
y_pred & y_true, axis=np.arange(one_hot_axis), dtype=tf.float32
)
# Shape: (num_classes, ...)
current_union = tf.math.count_nonzero(
y_pred | y_true, axis=np.arange(one_hot_axis), dtype=tf.float32
)
self.intersection_per_class.assign_add(
tf.cast(current_intersection, self.intersection_per_class.dtype))
self.union_per_class.assign_add(
tf.cast(current_union, self.union_per_class.dtype))
def result(self) -> tf.Tensor:
"""Computes IoU for each class."""
return tf.cast(
tf.math.divide_no_nan(self.intersection_per_class,
self.union_per_class), self.dtype)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_classes': self.num_classes,
'name': self.name,
'dtype': self.dtype,
'sparse_y_true': self.sparse_y_true,
'sparse_y_pred': self.sparse_y_pred,
'axis': self.axis,
}
| 6,354 | 34.702247 | 79 | py |
models | models-master/official/vision/evaluation/segmentation_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
from typing import Optional, Sequence, Tuple, Union
import tensorflow as tf
from official.vision.evaluation import iou
from official.vision.ops import box_ops
from official.vision.ops import spatial_transform_ops
class MeanIoU(tf.keras.metrics.MeanIoU):
"""Mean IoU metric for semantic segmentation.
This class utilizes tf.keras.metrics.MeanIoU to perform batched mean iou when
both input images and ground-truth masks are resized to the same size
(rescale_predictions=False). It also computes mean IoU on ground-truth
original sizes, in which case, each prediction is rescaled back to the
original image size.
"""
def __init__(self,
num_classes,
rescale_predictions=False,
name=None,
dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, 1], ground-truth masks.
- valid_masks: [batch, height, width, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, height_p, width_p, num_classes], predicated masks.
"""
predictions, masks, valid_masks = preprocess_inputs(
y_true, y_pred, self._rescale_predictions)
# Ignored mask elements are set to zero for fitting the confusion matrix.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(
y_true=flatten_masks,
y_pred=flatten_predictions,
sample_weight=tf.cast(flatten_valid_masks, tf.float32))
class PerClassIoU(MeanIoU):
"""Per class IoU metric for semantic segmentation."""
def result(self):
"""Compute IoU for each class via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
class PerClassIoUV2(iou.PerClassIoUV2):
"""Computes the per-class IoU metric for semantic segmentation.
This implementation converts predictions and ground truth to binary masks,
and uses logical AND and OR to compute intersection and union, which is much
faster than the MeanIoU and PerClassIoU (using confusion matrix) above on TPU,
but slower on CPU and GPU.
"""
def __init__(self,
num_classes: int,
rescale_predictions: bool = False,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
shape: Optional[Sequence[int]] = None,
axis: int = -1):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance.
dtype: data type of the metric result.
shape: shape of the metrics result.
axis: (Optional) Defaults to -1. The dimension containing the one-hot
values.
"""
super().__init__(
num_classes=num_classes, name=name, dtype=dtype, shape=shape, axis=axis)
self._rescale_predictions = rescale_predictions
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, num_layers], ground-truth masks. The
num_layers is 1 by default, while all the operations in this function
support num_layers > 1.
- valid_masks: [batch, height, width, num_layers], valid elements in the
mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, height_p, width_p, num_classes], predicated masks.
"""
logits, gt_masks, valid_masks = preprocess_inputs(y_true, y_pred,
self._rescale_predictions)
valid_masks = tf.cast(valid_masks, tf.bool)
gt_binary_masks = tf.one_hot(
tf.cast(gt_masks[..., 0], dtype=tf.int32),
depth=self.num_classes,
on_value=True,
off_value=False,
)
gt_binary_masks &= valid_masks
predictions_binary_masks = tf.one_hot(
tf.argmax(logits, axis=-1, output_type=tf.int32),
depth=self.num_classes,
on_value=True,
off_value=False,
)
predictions_binary_masks &= valid_masks
super().update_state(
y_true=gt_binary_masks, y_pred=predictions_binary_masks
)
class MeanIoUV2(PerClassIoUV2):
"""Computes the mean IoU metric for semantic segmentation."""
def __init__(self,
target_class_ids: Optional[Tuple[int, ...]] = None,
**kwargs):
"""Initializes the class.
Args:
target_class_ids: computes mean IoU for the target classes. Selects all
the if empty.
**kwargs: the other arguments for initializing the base class.
"""
super().__init__(**kwargs)
self._target_class_ids = target_class_ids
def result(self) -> tf.Tensor:
"""Average the IoUs of all the classes."""
# (num_classes, )
per_class_ious = super().result()
if self._target_class_ids:
# (num_classes, )
target_class_indicators = tf.reduce_max(
tf.one_hot(
self._target_class_ids,
depth=self.num_classes,
dtype=per_class_ious.dtype),
axis=0)
return tf.math.divide_no_nan(
tf.reduce_sum(per_class_ious * target_class_indicators),
tf.reduce_sum(target_class_indicators))
else:
return tf.reduce_mean(per_class_ious)
def preprocess_inputs(
y_true: tf.Tensor, y_pred: tf.Tensor,
rescale_predictions: bool) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Pre-processes the inputs (predictions and ground-truth) of the metrics.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, num_layers], ground-truth masks. The
num_layers is 1 by default, while all the operations in this function
support num_layers > 1.
- valid_masks: [batch, height, width, num_layers], valid elements in the
mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: tensor [batch, height_p, width_p, num_classes], predicated masks.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale predictions.
Returns:
logits: a float tensor in shape [batch, height, width, num_classes], which
stores the raw output of the model.
gt_masks: an int tensor in shape [batch, height, width, 1], which stores the
ground-truth masks.
valid_masks: a bool tensor in shape [batch, height, width, 1], which
indicates the valid elements of the masks.
"""
logits = y_pred
gt_masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(logits, tuple) or isinstance(logits, list):
logits = tf.concat(logits, axis=0)
gt_masks = tf.concat(gt_masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# The pixel is valid if any layer of the masks is valid at that pixel.
# (batch_size, height, width)
valid_masks = tf.reduce_any(tf.cast(valid_masks, tf.bool), axis=-1)
gt_masks_size = tf.shape(gt_masks)[1:3]
if rescale_predictions:
# Scale back predictions to original image shapes and pad to mask size.
# Note: instead of cropping the masks to image shape (dynamic), here we
# pad the rescaled predictions to mask size (fixed). And update the
# valid_masks to mask out the pixels outside the original image shape.
logits, image_shape_masks = (
_rescale_and_pad_predictions(
logits, images_info, output_size=gt_masks_size))
# Only the area within the original image shape is valid.
# (batch_size, height, width)
valid_masks &= image_shape_masks
else:
logits = tf.image.resize(
logits, gt_masks_size, method=tf.image.ResizeMethod.BILINEAR)
# (batch_size, height, width, 1)
valid_masks = valid_masks[..., tf.newaxis]
return logits, gt_masks, valid_masks
def _rescale_and_pad_predictions(
predictions: tf.Tensor, images_info: tf.Tensor,
output_size: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Scales back predictions to original image shapes and pads to output size.
Args:
predictions: A tensor in shape [batch, height, width, num_classes] which
stores the model predictions.
images_info: A tensor in shape [batch, 4, 2] that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width], [y_scale,
x_scale], [y_offset, x_offset]], where [desired_height, desired_width] is
the actual scaled image size, and [y_scale, x_scale] is the scaling
factor, which is the ratio of scaled dimension / original dimension.
output_size: A list/tuple/tensor stores the size of the padded output in
[output_height, output_width].
Returns:
predictions: A tensor in shape [batch, output_height, output_width,
num_classes] which stores the rescaled and padded predictions.
image_shape_masks: A bool tensor in shape [batch, output_height,
output_width] where the pixels inside the original image shape are true,
otherwise false.
"""
# (batch_size, 2)
image_shape = tf.cast(images_info[:, 0, :], tf.int32)
desired_size = tf.cast(images_info[:, 1, :], tf.float32)
image_scale = tf.cast(images_info[:, 2, :], tf.float32)
offset = tf.cast(images_info[:, 3, :], tf.int32)
rescale_size = tf.cast(tf.math.ceil(desired_size / image_scale), tf.int32)
# Rescale the predictions, then crop to the original image shape and
# finally pad zeros to match the mask size.
predictions = (
spatial_transform_ops.bilinear_resize_with_crop_and_pad(
predictions,
rescale_size,
crop_offset=offset,
crop_size=image_shape,
output_size=output_size))
# (batch_size, 2)
y0_x0 = tf.broadcast_to(
tf.constant([[0, 0]], dtype=image_shape.dtype), tf.shape(image_shape))
# (batch_size, 4)
image_shape_bbox = tf.concat([y0_x0, image_shape], axis=1)
# (batch_size, height, width)
image_shape_masks = box_ops.bbox2mask(
bbox=image_shape_bbox,
image_height=output_size[0],
image_width=output_size[1],
dtype=tf.bool)
return predictions, image_shape_masks
| 13,770 | 39.502941 | 80 | py |
models | models-master/official/vision/evaluation/instance_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for instance detection & segmentation."""
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from official.vision.ops import box_ops
from official.vision.ops import mask_ops
class AveragePrecision(tf.keras.layers.Layer):
"""The algorithm which computes average precision from P-R curve."""
def __init__(self, *args, **kwargs):
# Enforce the `AveragePrecision` to operate in `float32` given the
# implementation requirements.
super().__init__(*args, dtype=tf.float32, **kwargs)
def call(self, precisions, recalls):
"""Computes average precision."""
raise NotImplementedError
class COCOAveragePrecision(AveragePrecision):
"""Average precision in COCO style.
In COCO, AP is defined as the mean of interpolated precisions at a set of 101
equally spaced recall points [0, 0.01, ..., 1]. For each recall point r,
the precision is interpolated to the maximum precision with corresponding
recall r' >= r.
The VOC challenges before 2010 used the similar method, but only 11 recall
points [0, 0.1, ..., 1].
"""
def __init__(
self, num_recall_eval_points: int = 101, recalls_desc: bool = False
):
"""Initialization for COCOAveragePrecision.
Args:
num_recall_eval_points: the number of equally spaced recall points used
for interpolating the precisions.
recalls_desc: If true, the recalls are in descending order.
"""
super().__init__()
self._num_recall_eval_points = num_recall_eval_points
self._recalls_desc = recalls_desc
def get_config(self) -> Dict[str, Any]:
return {
'num_recall_eval_points': self._num_recall_eval_points,
'recalls_desc': self._recalls_desc,
}
def call(self, precisions: tf.Tensor, recalls: tf.Tensor) -> tf.Tensor:
"""Computes average precision.
Args:
precisions: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of precision values at different confidence thresholds with
arbitrary numbers of leading dimensions.
recalls: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of recall values at different confidence threshold with arbitrary
numbers of leading dimensions.
Returns:
A tensor in shape (dim_0, ...), which stores the area under P-R curve.
"""
p = precisions
r = recalls
if not isinstance(p, tf.Tensor):
p = tf.convert_to_tensor(p)
if not isinstance(r, tf.Tensor):
r = tf.convert_to_tensor(r)
if self._recalls_desc:
p = tf.reverse(p, axis=[-1])
r = tf.reverse(r, axis=[-1])
r_eval_points = tf.linspace(0.0, 1.0, self._num_recall_eval_points)
# (dim_0, ..., num_recall_eval_points)
# For each recall eval point, the precision is interpolated to the maximum
# precision with corresponding recall >= the recall eval point.
p_max = tf.reduce_max(
p[..., tf.newaxis, :]
* tf.cast(
r[..., tf.newaxis, :] >= r_eval_points[:, tf.newaxis], dtype=p.dtype
),
axis=-1,
)
# (dim_0, ...)
return tf.reduce_mean(p_max, axis=-1)
class VOC2010AveragePrecision(AveragePrecision):
"""Average precision in VOC 2010 style.
Since VOC 2010, first compute an approximation of the measured P-R curve
with precision monotonically decreasing, by setting the precision for recall
r to the maximum precision obtained for any recall r' >= r. Then compute the
AP as the area under this curve by numerical integration.
"""
def __init__(self, recalls_desc: bool = False):
"""Initialization for VOC10AveragePrecision.
Args:
recalls_desc: If true, the recalls are in descending order.
"""
super().__init__()
self._recalls_desc = recalls_desc
def get_config(self) -> Dict[str, Any]:
return {
'recalls_desc': self._recalls_desc,
}
def call(self, precisions: tf.Tensor, recalls: tf.Tensor) -> tf.Tensor:
"""Computes average precision.
Args:
precisions: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of precision values at different confidence thresholds with
arbitrary numbers of leading dimensions.
recalls: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of recall values at different confidence threshold with arbitrary
numbers of leading dimensions.
Returns:
A tensor in shape (dim_0, ...), which stores the area under P-R curve.
"""
p = precisions
r = recalls
if not isinstance(p, tf.Tensor):
p = tf.convert_to_tensor(p)
if not isinstance(r, tf.Tensor):
r = tf.convert_to_tensor(r)
if self._recalls_desc:
p = tf.reverse(p, axis=[-1])
r = tf.reverse(r, axis=[-1])
axis_indices = list(range(len(p.get_shape())))
# Transpose to (num_confidences, ...), because tf.scan only applies to the
# first dimension.
p = tf.transpose(p, np.roll(axis_indices, 1))
# Compute cumulative maximum in reverse order.
# For example, the reverse cumulative maximum of [5,6,3,4,2,1] is
# [6,6,4,4,2,1].
p = tf.scan(
tf.maximum, elems=p, initializer=tf.reduce_min(p, axis=0), reverse=True
)
# Transpose back to (..., num_confidences)
p = tf.transpose(p, np.roll(axis_indices, -1))
# Prepend 0 to r and compute the delta.
r = tf.concat([tf.zeros_like(r[..., 0:1]), r], axis=-1)
delta_r = tf.roll(r, shift=-1, axis=-1) - r
return tf.reduce_sum(p * delta_r[..., :-1], axis=-1)
class MatchingAlgorithm(tf.keras.layers.Layer):
"""The algorithm which matches detections to ground truths."""
def __init__(self, *args, **kwargs):
# Enforce the `MachingAlgorithm` to operate in `float32` given the
# implementation requirements.
super().__init__(*args, dtype=tf.float32, **kwargs)
def call(
self,
detection_to_gt_ious: tf.Tensor,
detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
gt_classes: tf.Tensor,
):
"""Matches detections to ground truths."""
raise NotImplementedError
class COCOMatchingAlgorithm(MatchingAlgorithm):
"""The detection matching algorithm used in COCO."""
def __init__(self, iou_thresholds: Tuple[float, ...]):
"""Initialization for COCOMatchingAlgorithm.
Args:
iou_thresholds: a list of IoU thresholds.
"""
super().__init__()
self._iou_thresholds = iou_thresholds
def get_config(self) -> Dict[str, Any]:
return {
'iou_thresholds': self._iou_thresholds,
}
def call(
self,
detection_to_gt_ious: tf.Tensor,
detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
gt_classes: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Matches detections to ground truths.
This is the matching algorithm used in COCO. First, sort all the detections
based on the scores from high to low. Then for each detection, iterates
through all ground truth. The unmatched ground truth with the highest IoU
greater than the threshold is matched to the detection.
Args:
detection_to_gt_ious: a tensor in shape of (batch_size, num_detections,
num_gts) which stores the IoUs for each pair of detection and ground
truth.
detection_classes: a tensor in shape of (batch_size, num_detections) which
stores the classes of the detections.
detection_scores: a tensor in shape of (batch_size, num_detections) which
stores the scores of the detections.
gt_classes: a tensor in shape of (batch_size, num_gts) which stores the
classes of the ground truth boxes.
Returns:
Two bool tensors in shape of (batch_size, num_detections,
num_iou_thresholds) and (batch_size, num_gts, num_iou_thresholds) which
indicates whether the detections and ground truths are true positives at
different IoU thresholds.
"""
batch_size = tf.shape(detection_classes)[0]
num_detections = detection_classes.get_shape()[1]
num_gts = gt_classes.get_shape()[1]
num_iou_thresholds = len(self._iou_thresholds)
# (batch_size, num_detections)
sorted_detection_indices = tf.argsort(
detection_scores, axis=1, direction='DESCENDING'
)
# (batch_size, num_detections)
sorted_detection_classes = tf.gather(
detection_classes, sorted_detection_indices, batch_dims=1
)
# (batch_size, num_detections, num_gts)
sorted_detection_to_gt_ious = tf.gather(
detection_to_gt_ious, sorted_detection_indices, batch_dims=1
)
init_loop_vars = (
0, # i: the loop counter
tf.zeros(
[batch_size, num_detections, num_iou_thresholds], dtype=tf.bool
), # detection_is_tp
tf.zeros(
[batch_size, num_gts, num_iou_thresholds], dtype=tf.bool
), # gt_is_tp
)
def _match_detection_to_gt_loop_body(
i: int, detection_is_tp: tf.Tensor, gt_is_tp: tf.Tensor
) -> Tuple[int, tf.Tensor, tf.Tensor]:
"""Iterates the sorted detections and matches to the ground truths."""
# (batch_size, num_gts)
gt_ious = sorted_detection_to_gt_ious[:, i, :]
# (batch_size, num_gts, num_iou_thresholds)
gt_matches_detection = (
# Ground truth is not matched yet.
~gt_is_tp
# IoU is greater than the threshold.
& (gt_ious[:, :, tf.newaxis] > self._iou_thresholds)
# Classes are matched.
& (
(sorted_detection_classes[:, i][:, tf.newaxis] == gt_classes)
& (gt_classes > 0)
)[:, :, tf.newaxis]
)
# Finds the matched ground truth with max IoU.
# If there is no matched ground truth, the argmax op will return index 0
# in this step. It's fine because it will be masked out in the next step.
# (batch_size, num_iou_thresholds)
matched_gt_with_max_iou = tf.argmax(
tf.cast(gt_matches_detection, gt_ious.dtype)
* gt_ious[:, :, tf.newaxis],
axis=1,
output_type=tf.int32,
)
# (batch_size, num_gts, num_iou_thresholds)
gt_matches_detection &= tf.one_hot(
matched_gt_with_max_iou,
depth=num_gts,
on_value=True,
off_value=False,
axis=1,
)
# Updates detection_is_tp
# Map index back to the unsorted detections.
# (batch_size, num_detections, num_iou_thresholds)
detection_is_tp |= (
tf.reduce_any(gt_matches_detection, axis=1, keepdims=True)
& tf.one_hot(
sorted_detection_indices[:, i],
depth=num_detections,
on_value=True,
off_value=False,
axis=-1,
)[:, :, tf.newaxis]
)
detection_is_tp.set_shape([None, num_detections, num_iou_thresholds])
# Updates gt_is_tp
# (batch_size, num_gts, num_iou_thresholds)
gt_is_tp |= gt_matches_detection
gt_is_tp.set_shape([None, num_gts, num_iou_thresholds])
# Returns the updated loop vars.
return (i + 1, detection_is_tp, gt_is_tp)
_, detection_is_tp_result, gt_is_tp_result = tf.while_loop(
cond=lambda i, *_: i < num_detections,
body=_match_detection_to_gt_loop_body,
loop_vars=init_loop_vars,
parallel_iterations=32,
maximum_iterations=num_detections,
)
return detection_is_tp_result, gt_is_tp_result
def _shift_and_rescale_boxes(
boxes: tf.Tensor,
output_boundary: Tuple[int, int],
) -> tf.Tensor:
"""Shift and rescale the boxes to fit in the output boundary.
The output boundary of the boxes can be smaller than the original image size
for accelerating the downstream calculations (dynamic mask resizing, mask IoU,
etc.).
For each image of the batch:
(1) find the upper boundary (min_ymin) and the left boundary (min_xmin) of all
the boxes.
(2) shift all the boxes up min_ymin pixels and left min_xmin pixels.
(3) find the new lower boundary (max_ymax) and the right boundary (max_xmax)
of all the boxes.
(4) if max_ymax > output_height or max_xmax > output_width (some boxes don't
fit in the output boundary), downsample all the boxes by ratio:
min(output_height / max_ymax, output_width / max_xmax). The aspect ratio
is not changed.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. The last dimension is
the pixel coordinates in [ymin, xmin, ymax, xmax] form.
output_boundary: two integers that represent the height and width of the
output.
Returns:
The tensor [batch_size, N, 4] of the output boxes.
"""
boxes = tf.cast(boxes, dtype=tf.float32)
# (batch_size, num_boxes, 1)
is_valid_box = tf.reduce_any(
(boxes[:, :, 2:4] - boxes[:, :, 0:2]) > 0, axis=-1, keepdims=True
)
# (batch_size, 2)
min_ymin_xmin = tf.reduce_min(
tf.where(is_valid_box, boxes, np.inf)[:, :, 0:2],
axis=1,
)
# (batch_size, num_boxes, 4)
boxes = tf.where(
is_valid_box,
boxes - tf.tile(min_ymin_xmin, [1, 2])[:, tf.newaxis, :],
0.0,
)
# (batch_size,)
max_ymax = tf.reduce_max(boxes[:, :, 2], axis=1)
max_xmax = tf.reduce_max(boxes[:, :, 3], axis=1)
# (batch_size,)
y_resize_ratio = output_boundary[0] / max_ymax
x_resize_ratio = output_boundary[1] / max_xmax
# (batch_size,)
downsampling_ratio = tf.math.minimum(
tf.math.minimum(y_resize_ratio, x_resize_ratio), 1.0
)
# (batch_size, num_boxes, 4)
return boxes * downsampling_ratio[:, tf.newaxis, tf.newaxis]
def _count_detection_type(
detection_type_mask: tf.Tensor,
detection_classes: tf.Tensor,
flattened_binned_confidence_one_hot: tf.Tensor,
num_classes: int,
) -> tf.Tensor:
"""Counts detection type grouped by IoU thresholds, classes and confidence bins.
Args:
detection_type_mask: a bool tensor in shape of (batch_size, num_detections,
num_iou_thresholds), which indicate a certain type of detections (e.g.
true postives).
detection_classes: a tensor in shape of (batch_size, num_detections) which
stores the classes of the detections.
flattened_binned_confidence_one_hot: a one-hot bool tensor in shape of
(batch_size * num_detections, num_confidence_bins + 1) which indicates the
binned confidence score of each detection.
num_classes: the number of classes.
Returns:
A tensor in shape of (num_iou_thresholds, num_classes,
num_confidence_bins + 1) which stores the count grouped by IoU thresholds,
classes and confidence bins.
"""
num_iou_thresholds = detection_type_mask.get_shape()[-1]
# (batch_size, num_detections, num_iou_thresholds)
masked_classes = tf.where(
detection_type_mask, detection_classes[..., tf.newaxis], -1
)
# (num_iou_thresholds, batch_size * num_detections)
flattened_masked_classes = tf.transpose(
tf.reshape(masked_classes, [-1, num_iou_thresholds])
)
# (num_iou_thresholds, num_classes, batch_size * num_detections)
flattened_masked_classes_one_hot = tf.one_hot(
flattened_masked_classes, depth=num_classes, axis=1
)
# (num_iou_thresholds * num_classes, batch_size * num_detections)
flattened_masked_classes_one_hot = tf.reshape(
flattened_masked_classes_one_hot,
[num_iou_thresholds * num_classes, -1],
)
# (num_iou_thresholds * num_classes, num_confidence_bins + 1)
count = tf.matmul(
flattened_masked_classes_one_hot,
tf.cast(flattened_binned_confidence_one_hot, tf.float32),
a_is_sparse=True,
b_is_sparse=True,
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
count = tf.reshape(count, [num_iou_thresholds, num_classes, -1])
# Clears the count of class 0 (background)
count *= 1.0 - tf.eye(num_classes, 1, dtype=count.dtype)
return count
class InstanceMetrics(tf.keras.metrics.Metric):
"""Reports the metrics of instance detection & segmentation."""
def __init__(
self,
num_classes: int,
use_masks: bool = False,
iou_thresholds: Tuple[float, ...] = (0.5,),
confidence_thresholds: Tuple[float, ...] = (),
num_confidence_bins: int = 1000,
mask_output_boundary: Tuple[int, int] = (640, 640),
matching_algorithm: Optional[MatchingAlgorithm] = None,
average_precision_algorithms: Optional[
Dict[str, AveragePrecision]
] = None,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
**kwargs
):
"""Initialization for AveragePrecision.
Args:
num_classes: the number of classes.
use_masks: if true, use the masks of the instances when calculating the
metrics, otherwise use the boxes.
iou_thresholds: a sequence of IoU thresholds over which to calculate the
instance metrics.
confidence_thresholds: a sequence of confidence thresholds. If set, also
report precision and recall at each confidence threshold, otherwise,
only report average precision.
num_confidence_bins: the number of confidence bins used for bin sort.
mask_output_boundary: two integers that represent the height and width of
the boundary where the resized instance masks are pasted. For each
example, if any of the detection or ground truth boxes is out of the
boundary, shift and resize all the detection and ground truth boxes of
the example to fit them into the boundary. The output boundary of the
pasted masks can be smaller than the real image size for accelerating
the calculation.
matching_algorithm: the algorithm which matches detections to ground
truths.
average_precision_algorithms: the algorithms which compute average
precision from P-R curve. The keys are used in the metrics results.
name: the name of the metric instance.
dtype: data type of the metric result.
**kwargs: Additional keywords arguments.
"""
super().__init__(name=name, dtype=dtype, **kwargs)
self._num_classes = num_classes
self._use_masks = use_masks
self._iou_thresholds = iou_thresholds
self._confidence_thresholds = confidence_thresholds
self._num_iou_thresholds = len(iou_thresholds)
self._num_confidence_bins = num_confidence_bins
self._mask_output_boundary = mask_output_boundary
if not matching_algorithm:
self._matching_algorithm = COCOMatchingAlgorithm(iou_thresholds)
else:
self._matching_algorithm = matching_algorithm
if not average_precision_algorithms:
self._average_precision_algorithms = {'ap': COCOAveragePrecision()}
else:
self._average_precision_algorithms = average_precision_algorithms
# Variables
self.tp_count = self.add_weight(
'tp_count',
shape=[
self._num_iou_thresholds,
self._num_classes,
self._num_confidence_bins + 1,
],
initializer='zeros',
dtype=tf.float32,
)
self.fp_count = self.add_weight(
'fp_count',
shape=[
self._num_iou_thresholds,
self._num_classes,
self._num_confidence_bins + 1,
],
initializer='zeros',
dtype=tf.float32,
)
self.gt_count = self.add_weight(
'gt_count',
shape=[self._num_classes],
initializer='zeros',
dtype=tf.float32,
)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_classes': self._num_classes,
'use_masks': self._use_masks,
'iou_thresholds': self._iou_thresholds,
'confidence_thresholds': self._confidence_thresholds,
'num_confidence_bins': self._num_confidence_bins,
'mask_output_boundary': self._mask_output_boundary,
'matching_algorithm': self._matching_algorithm,
'average_precision_algorithms': self._average_precision_algorithms,
'name': self.name,
'dtype': self.dtype,
}
def reset_state(self):
"""Resets all of the metric state variables."""
self.tp_count.assign(tf.zeros_like(self.tp_count))
self.fp_count.assign(tf.zeros_like(self.fp_count))
self.gt_count.assign(tf.zeros_like(self.gt_count))
def update_state(
self, y_true: Dict[str, tf.Tensor], y_pred: Dict[str, tf.Tensor]
):
# (batch_size, num_detections, 4) in absolute coordinates.
detection_boxes = tf.cast(y_pred['detection_boxes'], tf.float32)
# (batch_size, num_detections)
detection_classes = tf.cast(y_pred['detection_classes'], tf.int32)
# (batch_size, num_detections)
detection_scores = tf.cast(y_pred['detection_scores'], tf.float32)
# (batch_size, num_gts, 4) in absolute coordinates.
gt_boxes = tf.cast(y_true['boxes'], tf.float32)
# (batch_size, num_gts)
gt_classes = tf.cast(y_true['classes'], tf.int32)
# (batch_size, num_gts)
if 'is_crowds' in y_true:
gt_is_crowd = tf.cast(y_true['is_crowds'], tf.bool)
else:
gt_is_crowd = tf.zeros_like(gt_classes, dtype=tf.bool)
image_scale = tf.tile(y_true['image_info'][:, 2:3, :], multiples=[1, 1, 2])
detection_boxes = detection_boxes / tf.cast(
image_scale, dtype=detection_boxes.dtype
)
# Step 1: Computes IoUs between the detections and the non-crowd ground
# truths and IoAs between the detections and the crowd ground truths.
if not self._use_masks:
# (batch_size, num_detections, num_gts)
detection_to_gt_ious = box_ops.bbox_overlap(detection_boxes, gt_boxes)
detection_to_gt_ioas = box_ops.bbox_intersection_over_area(
detection_boxes, gt_boxes
)
else:
# Use outer boxes to generate the masks if available.
if 'detection_outer_boxes' in y_pred:
detection_boxes = tf.cast(y_pred['detection_outer_boxes'], tf.float32)
# (batch_size, num_detections, mask_height, mask_width)
detection_masks = tf.cast(y_pred['detection_masks'], tf.float32)
# (batch_size, num_gts, gt_mask_height, gt_mask_width)
gt_masks = tf.cast(y_true['masks'], tf.float32)
num_detections = detection_boxes.get_shape()[1]
# (batch_size, num_detections + num_gts, 4)
all_boxes = _shift_and_rescale_boxes(
tf.concat([detection_boxes, gt_boxes], axis=1),
self._mask_output_boundary,
)
detection_boxes = all_boxes[:, :num_detections, :]
gt_boxes = all_boxes[:, num_detections:, :]
# (batch_size, num_detections, num_gts)
detection_to_gt_ious, detection_to_gt_ioas = (
mask_ops.instance_masks_overlap(
detection_boxes,
detection_masks,
gt_boxes,
gt_masks,
output_size=self._mask_output_boundary,
)
)
# (batch_size, num_detections, num_gts)
detection_to_gt_ious = tf.where(
gt_is_crowd[:, tf.newaxis, :], 0.0, detection_to_gt_ious
)
detection_to_crowd_ioas = tf.where(
gt_is_crowd[:, tf.newaxis, :], detection_to_gt_ioas, 0.0
)
# Step 2: counts true positives grouped by IoU thresholds, classes and
# confidence bins.
# (batch_size, num_detections, num_iou_thresholds)
detection_is_tp, _ = self._matching_algorithm(
detection_to_gt_ious, detection_classes, detection_scores, gt_classes
)
# (batch_size * num_detections,)
flattened_binned_confidence = tf.reshape(
tf.cast(detection_scores * self._num_confidence_bins, tf.int32), [-1]
)
# (batch_size * num_detections, num_confidence_bins + 1)
flattened_binned_confidence_one_hot = tf.one_hot(
flattened_binned_confidence, self._num_confidence_bins + 1, axis=1
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
tp_count = _count_detection_type(
detection_is_tp,
detection_classes,
flattened_binned_confidence_one_hot,
self._num_classes,
)
# Step 3: Counts false positives grouped by IoU thresholds, classes and
# confidence bins.
# False positive: detection is not true positive (see above) and not part of
# the crowd ground truth with the same class.
# (batch_size, num_detections, num_gts, num_iou_thresholds)
detection_matches_crowd = (
(detection_to_crowd_ioas[..., tf.newaxis] > self._iou_thresholds)
& (
detection_classes[:, :, tf.newaxis, tf.newaxis]
== gt_classes[:, tf.newaxis, :, tf.newaxis]
)
& (detection_classes[:, :, tf.newaxis, tf.newaxis] > 0)
)
# (batch_size, num_detections, num_iou_thresholds)
detection_matches_any_crowd = tf.reduce_any(
detection_matches_crowd & ~detection_is_tp[:, :, tf.newaxis, :], axis=2
)
detection_is_fp = ~detection_is_tp & ~detection_matches_any_crowd
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
fp_count = _count_detection_type(
detection_is_fp,
detection_classes,
flattened_binned_confidence_one_hot,
self._num_classes,
)
# Step 4: Counts non-crowd groundtruths grouped by classes.
# (num_classes, )
gt_count = tf.reduce_sum(
tf.one_hot(
tf.where(gt_is_crowd, -1, gt_classes), self._num_classes, axis=-1
),
axis=[0, 1],
)
# Clears the count of class 0 (background).
gt_count *= 1.0 - tf.eye(1, self._num_classes, dtype=gt_count.dtype)[0]
# Accumulates the variables.
self.fp_count.assign_add(tf.cast(fp_count, self.fp_count.dtype))
self.tp_count.assign_add(tf.cast(tp_count, self.tp_count.dtype))
self.gt_count.assign_add(tf.cast(gt_count, self.gt_count.dtype))
def result(self) -> Dict[str, tf.Tensor]:
"""Returns the metrics values as a dict.
Returns:
A `dict` containing:
'ap': a float tensor in shape (num_iou_thresholds, num_classes) which
stores the average precision of each class at different IoU thresholds.
'precision': a float tensor in shape (num_confidence_thresholds,
num_iou_thresholds, num_classes) which stores the precision of each
class at different confidence thresholds & IoU thresholds.
'recall': a float tensor in shape (num_confidence_thresholds,
num_iou_thresholds, num_classes) which stores the recall of each
class at different confidence thresholds & IoU thresholds.
'valid_classes': a bool tensor in shape (num_classes,). If False, there
is no instance of the class in the ground truth.
"""
result = {
# (num_classes,)
'valid_classes': self.gt_count != 0,
}
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
tp_count_cum_by_confidence = tf.math.cumsum(
self.tp_count, axis=-1, reverse=True
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
fp_count_cum_by_confidence = tf.math.cumsum(
self.fp_count, axis=-1, reverse=True
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
precisions = tf.math.divide_no_nan(
tp_count_cum_by_confidence,
tp_count_cum_by_confidence + fp_count_cum_by_confidence,
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
recalls = tf.math.divide_no_nan(
tp_count_cum_by_confidence, self.gt_count[..., tf.newaxis]
)
if self._confidence_thresholds:
# If confidence_thresholds is set, reports precision and recall at each
# confidence threshold.
confidence_thresholds = tf.cast(
tf.constant(self._confidence_thresholds, dtype=tf.float32)
* self._num_confidence_bins,
dtype=tf.int32,
)
# (num_confidence_thresholds, num_iou_thresholds, num_classes)
result['precisions'] = tf.gather(
tf.transpose(precisions, [2, 0, 1]), confidence_thresholds
)
result['recalls'] = tf.gather(
tf.transpose(recalls, [2, 0, 1]), confidence_thresholds
)
precisions = tf.reverse(precisions, axis=[-1])
recalls = tf.reverse(recalls, axis=[-1])
result.update(
{
# (num_iou_thresholds, num_classes)
key: ap_algorithm(precisions, recalls)
for key, ap_algorithm in self._average_precision_algorithms.items()
}
)
return result
def get_average_precision_metrics_keys(self):
"""Gets the keys of the average precision metrics in the results."""
return self._average_precision_algorithms.keys()
| 29,104 | 36.123724 | 82 | py |
models | models-master/official/vision/evaluation/panoptic_quality.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Panoptic Quality metric.
Panoptic Quality is an instance-based metric for evaluating the task of
image parsing, aka panoptic segmentation.
Please see the paper for details:
"Panoptic Segmentation", Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother and Piotr Dollar. arXiv:1801.00868, 2018.
Note that this metric class is branched from
https://github.com/tensorflow/models/blob/master/research/deeplab/evaluation/panoptic_quality.py
"""
import collections
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from official.vision.ops import box_ops
_EPSILON = 1e-10
def realdiv_maybe_zero(x, y):
"""Element-wise x / y where y may contain zeros, for those returns 0 too."""
return np.where(
np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y))
def _ids_to_counts(id_array):
"""Given a numpy array, a mapping from each unique entry to its count."""
ids, counts = np.unique(id_array, return_counts=True)
return dict(zip(ids, counts))
class PanopticQuality:
"""Metric class for Panoptic Quality.
"Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother, Piotr Dollar.
https://arxiv.org/abs/1801.00868
"""
def __init__(self, num_categories, ignored_label, max_instances_per_category,
offset):
"""Initialization for PanopticQualityMetric.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset).
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between ground-truth and predicted segments.
"""
self.num_categories = num_categories
self.ignored_label = ignored_label
self.max_instances_per_category = max_instances_per_category
self.offset = offset
self.reset()
def _naively_combine_labels(self, category_mask, instance_mask):
"""Naively creates a combined label array from categories and instances."""
return (category_mask.astype(np.uint32) * self.max_instances_per_category +
instance_mask.astype(np.uint32))
def compare_and_accumulate(self, groundtruths, predictions):
"""Compares predictions with ground-truths, and accumulates the metrics.
It is not assumed that instance ids are unique across different categories.
See for example combine_semantic_and_instance_predictions.py in official
PanopticAPI evaluation code for issues to consider when fusing category
and instance labels.
Instances ids of the ignored category have the meaning that id 0 is "void"
and remaining ones are crowd instances.
Args:
groundtruths: A dictionary contains ground-truth labels. It should contain
the following fields.
- category_mask: A 2D numpy uint16 array of ground-truth per-pixel
category labels.
- instance_mask: A 2D numpy uint16 array of ground-truth per-pixel
instance labels.
predictions: A dictionary contains the model outputs. It should contain
the following fields.
- category_array: A 2D numpy uint16 array of predicted per-pixel
category labels.
- instance_array: A 2D numpy uint16 array of predicted instance labels.
"""
groundtruth_category_mask = groundtruths['category_mask']
groundtruth_instance_mask = groundtruths['instance_mask']
predicted_category_mask = predictions['category_mask']
predicted_instance_mask = predictions['instance_mask']
# First, combine the category and instance labels so that every unique
# value for (category, instance) is assigned a unique integer label.
pred_segment_id = self._naively_combine_labels(predicted_category_mask,
predicted_instance_mask)
gt_segment_id = self._naively_combine_labels(groundtruth_category_mask,
groundtruth_instance_mask)
# Pre-calculate areas for all ground-truth and predicted segments.
gt_segment_areas = _ids_to_counts(gt_segment_id)
pred_segment_areas = _ids_to_counts(pred_segment_id)
# We assume there is only one void segment and it has instance id = 0.
void_segment_id = self.ignored_label * self.max_instances_per_category
# There may be other ignored ground-truth segments with instance id > 0,
# find those ids using the unique segment ids extracted with the area
# computation above.
ignored_segment_ids = {
gt_segment_id for gt_segment_id in gt_segment_areas
if (gt_segment_id //
self.max_instances_per_category) == self.ignored_label
}
# Next, combine the ground-truth and predicted labels. Divide up the pixels
# based on which ground-truth segment and predicted segment they belong to,
# this will assign a different 32-bit integer label to each choice of
# (ground-truth segment, predicted segment), encoded as
# gt_segment_id * offset + pred_segment_id.
intersection_id_array = (
gt_segment_id.astype(np.uint64) * self.offset +
pred_segment_id.astype(np.uint64))
# For every combination of (ground-truth segment, predicted segment) with a
# non-empty intersection, this counts the number of pixels in that
# intersection.
intersection_areas = _ids_to_counts(intersection_id_array)
# Helper function that computes the area of the overlap between a predicted
# segment and the ground-truth void/ignored segment.
def prediction_void_overlap(pred_segment_id):
void_intersection_id = void_segment_id * self.offset + pred_segment_id
return intersection_areas.get(void_intersection_id, 0)
# Compute overall ignored overlap.
def prediction_ignored_overlap(pred_segment_id):
total_ignored_overlap = 0
for ignored_segment_id in ignored_segment_ids:
intersection_id = ignored_segment_id * self.offset + pred_segment_id
total_ignored_overlap += intersection_areas.get(intersection_id, 0)
return total_ignored_overlap
# Sets that are populated with segments which ground-truth/predicted
# segments have been matched with overlapping predicted/ground-truth
# segments respectively.
gt_matched = set()
pred_matched = set()
# Calculate IoU per pair of intersecting segments of the same category.
for intersection_id, intersection_area in intersection_areas.items():
gt_segment_id = int(intersection_id // self.offset)
pred_segment_id = int(intersection_id % self.offset)
gt_category = int(gt_segment_id // self.max_instances_per_category)
pred_category = int(pred_segment_id // self.max_instances_per_category)
if gt_category != pred_category:
continue
# Union between the ground-truth and predicted segments being compared
# does not include the portion of the predicted segment that consists of
# ground-truth "void" pixels.
union = (
gt_segment_areas[gt_segment_id] +
pred_segment_areas[pred_segment_id] - intersection_area -
prediction_void_overlap(pred_segment_id))
iou = intersection_area / union
if iou > 0.5:
self.tp_per_class[gt_category] += 1
self.iou_per_class[gt_category] += iou
gt_matched.add(gt_segment_id)
pred_matched.add(pred_segment_id)
# Count false negatives for each category.
for gt_segment_id in gt_segment_areas:
if gt_segment_id in gt_matched:
continue
category = gt_segment_id // self.max_instances_per_category
# Failing to detect a void segment is not a false negative.
if category == self.ignored_label:
continue
self.fn_per_class[category] += 1
# Count false positives for each category.
for pred_segment_id in pred_segment_areas:
if pred_segment_id in pred_matched:
continue
# A false positive is not penalized if is mostly ignored in the
# ground-truth.
if (prediction_ignored_overlap(pred_segment_id) /
pred_segment_areas[pred_segment_id]) > 0.5:
continue
category = pred_segment_id // self.max_instances_per_category
self.fp_per_class[category] += 1
def _valid_categories(self):
"""Categories with a "valid" value for the metric, have > 0 instances.
We will ignore the `ignore_label` class and other classes which have
`tp + fn + fp = 0`.
Returns:
Boolean array of shape `[num_categories]`.
"""
valid_categories = np.not_equal(
self.tp_per_class + self.fn_per_class + self.fp_per_class, 0)
if self.ignored_label >= 0 and self.ignored_label < self.num_categories:
valid_categories[self.ignored_label] = False
return valid_categories
def result_per_category(self):
"""For supported metrics, return individual per-category metric values.
Returns:
A dictionary contains all per-class metrics, each metrics is a numpy array
of shape `[self.num_categories]`, where index `i` is the metrics value
over only that category.
"""
sq_per_class = realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)
rq_per_class = realdiv_maybe_zero(
self.tp_per_class,
self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)
return {
'sq_per_class': sq_per_class,
'rq_per_class': rq_per_class,
'pq_per_class': np.multiply(sq_per_class, rq_per_class)
}
def result(self, is_thing=None):
"""Computes and returns the detailed metric results over all comparisons.
Args:
is_thing: A boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff."
Returns:
A dictionary with a breakdown of metrics and/or metric factors by things,
stuff, and all categories.
"""
results = self.result_per_category()
valid_categories = self._valid_categories()
# If known, break down which categories are valid _and_ things/stuff.
category_sets = collections.OrderedDict()
category_sets['All'] = valid_categories
if is_thing is not None:
category_sets['Things'] = np.logical_and(valid_categories, is_thing)
category_sets['Stuff'] = np.logical_and(valid_categories,
np.logical_not(is_thing))
for category_set_name, in_category_set in category_sets.items():
if np.any(in_category_set):
results.update({
f'{category_set_name}_pq':
np.mean(results['pq_per_class'][in_category_set]),
f'{category_set_name}_sq':
np.mean(results['sq_per_class'][in_category_set]),
f'{category_set_name}_rq':
np.mean(results['rq_per_class'][in_category_set]),
# The number of categories in this subset.
f'{category_set_name}_num_categories':
np.sum(in_category_set.astype(np.int32)),
})
else:
results.update({
f'{category_set_name}_pq': 0.,
f'{category_set_name}_sq': 0.,
f'{category_set_name}_rq': 0.,
f'{category_set_name}_num_categories': 0
})
return results
def reset(self):
"""Resets the accumulation to the metric class's state at initialization."""
self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64)
def _get_instance_class_ids(
category_mask: tf.Tensor,
instance_mask: tf.Tensor,
max_num_instances: int,
ignored_label: int,
) -> tf.Tensor:
"""Get the class id of each instance (index starts from 1)."""
# (batch_size, height, width)
instance_mask = tf.where(
(instance_mask == 0) | (category_mask == ignored_label), -1, instance_mask
)
# (batch_size, height, width, max_num_instances + 1)
instance_binary_mask = tf.one_hot(
instance_mask, max_num_instances + 1, dtype=tf.int32
)
# (batch_size, max_num_instances + 1)
result = tf.reduce_max(
instance_binary_mask * category_mask[..., tf.newaxis], axis=[1, 2]
)
# If not an instance, sets the class id to -1.
return tf.where(result == 0, -1, result)
class PanopticQualityV2(tf.keras.metrics.Metric):
"""Panoptic quality metrics with vectorized implementation.
This implementation is supported on TPU.
"Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother, Piotr Dollar.
https://arxiv.org/abs/1801.00868
"""
def __init__(
self,
num_categories: int,
is_thing: Optional[Tuple[bool, ...]] = None,
max_num_instances: int = 255,
ignored_label: int = 255,
rescale_predictions: bool = False,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
):
"""Initialization for PanopticQualityV2.
Args:
num_categories: the number of categories.
is_thing: a boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff". Default to `None`, and it means categories are not
classified into these two categories.
max_num_instances: the maximum number of instances in an image.
ignored_label: a category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
rescale_predictions: whether to scale back prediction to original image
sizes. If True, the image_info of the groundtruth is used to rescale
predictions.
name: string name of the metric instance.
dtype: data type of the metric result.
"""
super().__init__(name=name, dtype=dtype)
self._num_categories = num_categories
if is_thing is not None:
self._is_thing = tf.convert_to_tensor(is_thing)
self._is_thing.set_shape([self._num_categories])
else:
self._is_thing = tf.ones([self._num_categories], dtype=tf.bool)
self._max_num_instances = max_num_instances
self._ignored_label = ignored_label
self._rescale_predictions = rescale_predictions
# Variables
self.tp_count = self.add_weight(
'tp_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.fp_count = self.add_weight(
'fp_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.fn_count = self.add_weight(
'fn_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.tp_iou_sum = self.add_weight(
'tp_iou_sum',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_categories': self._num_categories,
'is_thing': self._is_thing,
'max_num_instances': self._max_num_instances,
'ignored_label': self._ignored_label,
'rescale_predictions': self._rescale_predictions,
'name': self.name,
'dtype': self.dtype,
}
def reset_state(self):
"""Resets all of the metric state variables."""
self.tp_count.assign(tf.zeros_like(self.tp_count))
self.fp_count.assign(tf.zeros_like(self.fp_count))
self.fn_count.assign(tf.zeros_like(self.fn_count))
self.tp_iou_sum.assign(tf.zeros_like(self.tp_iou_sum))
def update_state(
self, y_true: Dict[str, tf.Tensor], y_pred: Dict[str, tf.Tensor]
):
category_mask = tf.convert_to_tensor(y_pred['category_mask'], tf.int32)
instance_mask = tf.convert_to_tensor(y_pred['instance_mask'], tf.int32)
gt_category_mask = tf.convert_to_tensor(y_true['category_mask'], tf.int32)
gt_instance_mask = tf.convert_to_tensor(y_true['instance_mask'], tf.int32)
if self._rescale_predictions:
_, height, width = gt_category_mask.get_shape().as_list()
# Instead of cropping the masks to the original image shape (dynamic),
# here we keep the mask shape (fixed) and ignore the pixels outside the
# original image shape.
image_shape = tf.cast(y_true['image_info'][:, 0, :], tf.int32)
# (batch_size, 2)
y0_x0 = tf.broadcast_to(
tf.constant([[0, 0]], dtype=tf.int32), tf.shape(image_shape)
)
# (batch_size, 4)
image_shape_bbox = tf.concat([y0_x0, image_shape], axis=1)
# (batch_size, height, width)
image_shape_masks = box_ops.bbox2mask(
bbox=image_shape_bbox,
image_height=height,
image_width=width,
dtype=tf.bool,
)
# (batch_size, height, width)
category_mask = tf.where(
image_shape_masks, category_mask, self._ignored_label
)
instance_mask = tf.where(image_shape_masks, instance_mask, 0)
gt_category_mask = tf.where(
image_shape_masks, gt_category_mask, self._ignored_label
)
gt_instance_mask = tf.where(image_shape_masks, gt_instance_mask, 0)
self._update_thing_classes(
category_mask, instance_mask, gt_category_mask, gt_instance_mask
)
self._update_stuff_classes(category_mask, gt_category_mask)
def _update_thing_classes(
self,
category_mask: tf.Tensor,
instance_mask: tf.Tensor,
gt_category_mask: tf.Tensor,
gt_instance_mask: tf.Tensor,
):
_, height, width = category_mask.get_shape().as_list()
# (batch_size, num_detections + 1)
instance_class_ids = _get_instance_class_ids(
category_mask,
instance_mask,
self._max_num_instances,
self._ignored_label,
)
# (batch_size, num_gts + 1)
gt_instance_class_ids = _get_instance_class_ids(
gt_category_mask,
gt_instance_mask,
self._max_num_instances,
self._ignored_label,
)
# (batch_size, height, width)
valid_mask = gt_category_mask != self._ignored_label
# (batch_size, height, width, num_detections + 1)
instance_binary_masks = tf.one_hot(
tf.where(instance_mask > 0, instance_mask, -1),
self._max_num_instances + 1,
on_value=True,
off_value=False,
)
# (batch_size, height, width, num_gts + 1)
gt_instance_binary_masks = tf.one_hot(
tf.where(gt_instance_mask > 0, gt_instance_mask, -1),
self._max_num_instances + 1,
on_value=True,
off_value=False,
)
# (batch_size, height * width, num_detections + 1)
flattened_binary_masks = tf.reshape(
instance_binary_masks & valid_mask[..., tf.newaxis],
[-1, height * width, self._max_num_instances + 1],
)
# (batch_size, height * width, num_gts + 1)
flattened_gt_binary_masks = tf.reshape(
gt_instance_binary_masks & valid_mask[..., tf.newaxis],
[-1, height * width, self._max_num_instances + 1],
)
# (batch_size, num_detections + 1, height * width)
flattened_binary_masks = tf.transpose(flattened_binary_masks, [0, 2, 1])
# (batch_size, num_detections + 1, num_gts + 1)
intersection = tf.matmul(
tf.cast(flattened_binary_masks, tf.float32),
tf.cast(flattened_gt_binary_masks, tf.float32),
)
union = (
tf.math.count_nonzero(
flattened_binary_masks, axis=-1, keepdims=True, dtype=tf.float32
)
+ tf.math.count_nonzero(
flattened_gt_binary_masks, axis=-2, keepdims=True, dtype=tf.float32
)
- intersection
)
# (batch_size, num_detections + 1, num_gts + 1)
detection_to_gt_ious = tf.math.divide_no_nan(intersection, union)
detection_matches_gt = (
(detection_to_gt_ious > 0.5)
& (
instance_class_ids[:, :, tf.newaxis]
== gt_instance_class_ids[:, tf.newaxis, :]
)
& (gt_instance_class_ids[:, tf.newaxis, :] > 0)
)
# (batch_size, num_gts + 1)
is_tp = tf.reduce_any(detection_matches_gt, axis=1)
# (batch_size, num_gts + 1)
tp_iou = tf.reduce_max(
tf.where(detection_matches_gt, detection_to_gt_ious, 0), axis=1
)
# (batch_size, num_detections + 1)
is_fp = tf.reduce_any(instance_binary_masks, axis=[1, 2]) & ~tf.reduce_any(
detection_matches_gt, axis=2
)
# (batch_size, height, width, num_detections + 1)
fp_binary_mask = is_fp[:, tf.newaxis, tf.newaxis, :] & instance_binary_masks
# (batch_size, num_detections + 1)
fp_area = tf.math.count_nonzero(
fp_binary_mask, axis=[1, 2], dtype=tf.float32
)
# (batch_size, num_detections + 1)
fp_crowd_or_ignored_area = tf.math.count_nonzero(
fp_binary_mask
& (
(
# An instance detection matches a crowd ground truth instance if
# the instance class of the detection matches the class of the
# ground truth and the instance id of the ground truth is 0 (the
# instance is crowd).
(instance_mask > 0)
& (category_mask > 0)
& (gt_category_mask == category_mask)
& (gt_instance_mask == 0)
)
| (gt_category_mask == self._ignored_label)
)[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
# Don't count the detection as false positive if over 50% pixels of the
# instance detection are crowd of the matching class or ignored pixels in
# ground truth.
# (batch_size, num_detections + 1)
is_fp &= tf.math.divide_no_nan(fp_crowd_or_ignored_area, fp_area) <= 0.5
# (batch_size, num_detections + 1, num_categories)
detection_by_class = tf.one_hot(
instance_class_ids, self._num_categories, on_value=True, off_value=False
)
# (batch_size, num_gts + 1, num_categories)
gt_by_class = tf.one_hot(
gt_instance_class_ids,
self._num_categories,
on_value=True,
off_value=False,
)
# (num_categories,)
gt_count = tf.math.count_nonzero(gt_by_class, axis=[0, 1], dtype=tf.float32)
tp_count = tf.math.count_nonzero(
is_tp[..., tf.newaxis] & gt_by_class, axis=[0, 1], dtype=tf.float32
)
fn_count = gt_count - tp_count
fp_count = tf.math.count_nonzero(
is_fp[..., tf.newaxis] & detection_by_class,
axis=[0, 1],
dtype=tf.float32,
)
tp_iou_sum = tf.reduce_sum(
tf.cast(gt_by_class, tf.float32) * tp_iou[..., tf.newaxis], axis=[0, 1]
)
self.tp_count.assign_add(tp_count)
self.fn_count.assign_add(fn_count)
self.fp_count.assign_add(fp_count)
self.tp_iou_sum.assign_add(tp_iou_sum)
def _update_stuff_classes(
self, category_mask: tf.Tensor, gt_category_mask: tf.Tensor
):
# (batch_size, height, width, num_categories)
category_binary_mask = tf.one_hot(
category_mask, self._num_categories, on_value=True, off_value=False
)
gt_category_binary_mask = tf.one_hot(
gt_category_mask, self._num_categories, on_value=True, off_value=False
)
# (batch_size, height, width)
valid_mask = gt_category_mask != self._ignored_label
# (batch_size, num_categories)
intersection = tf.math.count_nonzero(
category_binary_mask
& gt_category_binary_mask
& valid_mask[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
union = tf.math.count_nonzero(
(category_binary_mask | gt_category_binary_mask)
& valid_mask[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
iou = tf.math.divide_no_nan(intersection, union)
# (batch_size, num_categories)
is_tp = (iou > 0.5) & ~self._is_thing
is_fn = (
tf.reduce_any(gt_category_binary_mask, axis=[1, 2])
& ~self._is_thing
& ~is_tp
)
is_fp = (
tf.reduce_any(category_binary_mask, axis=[1, 2])
& ~self._is_thing
& ~is_tp
)
# (batch_size, height, width, num_categories)
fp_binary_mask = is_fp[:, tf.newaxis, tf.newaxis, :] & category_binary_mask
# (batch_size, num_categories)
fp_area = tf.math.count_nonzero(
fp_binary_mask, axis=[1, 2], dtype=tf.float32
)
fp_ignored_area = tf.math.count_nonzero(
fp_binary_mask
& (gt_category_mask == self._ignored_label)[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
# Don't count the detection as false positive if over 50% pixels of the
# stuff detection are ignored pixels in ground truth.
is_fp &= tf.math.divide_no_nan(fp_ignored_area, fp_area) <= 0.5
# (num_categories,)
tp_count = tf.math.count_nonzero(is_tp, axis=0, dtype=tf.float32)
fn_count = tf.math.count_nonzero(is_fn, axis=0, dtype=tf.float32)
fp_count = tf.math.count_nonzero(is_fp, axis=0, dtype=tf.float32)
tp_iou_sum = tf.reduce_sum(tf.cast(is_tp, tf.float32) * iou, axis=0)
self.tp_count.assign_add(tp_count)
self.fn_count.assign_add(fn_count)
self.fp_count.assign_add(fp_count)
self.tp_iou_sum.assign_add(tp_iou_sum)
def result(self) -> Dict[str, tf.Tensor]:
"""Returns the metrics values as a dict."""
# (num_categories,)
tp_fn_fp_count = self.tp_count + self.fn_count + self.fp_count
is_ignore_label = tf.one_hot(
self._ignored_label,
self._num_categories,
on_value=True,
off_value=False,
)
sq_per_class = tf.math.divide_no_nan(
self.tp_iou_sum, self.tp_count
) * tf.cast(~is_ignore_label, tf.float32)
rq_per_class = tf.math.divide_no_nan(
self.tp_count, self.tp_count + 0.5 * self.fp_count + 0.5 * self.fn_count
) * tf.cast(~is_ignore_label, tf.float32)
pq_per_class = sq_per_class * rq_per_class
result = {
# (num_categories,)
'valid_thing_classes': (
(tp_fn_fp_count > 0) & self._is_thing & ~is_ignore_label
),
# (num_categories,)
'valid_stuff_classes': (
(tp_fn_fp_count > 0) & ~self._is_thing & ~is_ignore_label
),
# (num_categories,)
'sq_per_class': sq_per_class,
# (num_categories,)
'rq_per_class': rq_per_class,
# (num_categories,)
'pq_per_class': pq_per_class,
}
return result
| 27,477 | 37.110957 | 96 | py |
models | models-master/official/vision/examples/starter/example_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sample model implementation.
This is only a dummy example to showcase how a model is composed. It is usually
not needed to implement a model from scratch. Most SoTA models can be found and
directly used from `official/vision/modeling` directory.
"""
from typing import Any, Mapping
# Import libraries
import tensorflow as tf
from official.vision.examples.starter import example_config as example_cfg
class ExampleModel(tf.keras.Model):
"""A example model class.
A model is a subclass of tf.keras.Model where layers are built in the
constructor.
"""
def __init__(
self,
num_classes: int,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
**kwargs):
"""Initializes the example model.
All layers are defined in the constructor, and config is recorded in the
`_config_dict` object for serialization.
Args:
num_classes: The number of classes in classification task.
input_specs: A `tf.keras.layers.InputSpec` spec of the input tensor.
**kwargs: Additional keyword arguments to be passed.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name)
outputs = tf.keras.layers.Conv2D(
filters=16, kernel_size=3, strides=2, padding='same', use_bias=False)(
inputs)
outputs = tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=2, padding='same', use_bias=False)(
outputs)
outputs = tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=2, padding='same', use_bias=False)(
outputs)
outputs = tf.keras.layers.GlobalAveragePooling2D()(outputs)
outputs = tf.keras.layers.Dense(1024, activation='relu')(outputs)
outputs = tf.keras.layers.Dense(num_classes)(outputs)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self._input_specs = input_specs
self._config_dict = {'num_classes': num_classes, 'input_specs': input_specs}
def get_config(self) -> Mapping[str, Any]:
"""Gets the config of this model."""
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
"""Constructs an instance of this model from input config."""
return cls(**config)
def build_example_model(input_specs: tf.keras.layers.InputSpec,
model_config: example_cfg.ExampleModel,
**kwargs) -> tf.keras.Model:
"""Builds and returns the example model.
This function is the main entry point to build a model. Commonly, it builds a
model by building a backbone, decoder and head. An example of building a
classification model is at
third_party/tensorflow_models/official/vision/modeling/backbones/resnet.py.
However, it is not mandatory for all models to have these three pieces
exactly. Depending on the task, model can be as simple as the example model
here or more complex, such as multi-head architecture.
Args:
input_specs: The specs of the input layer that defines input size.
model_config: The config containing parameters to build a model.
**kwargs: Additional keyword arguments to be passed.
Returns:
A tf.keras.Model object.
"""
return ExampleModel(
num_classes=model_config.num_classes, input_specs=input_specs, **kwargs)
| 3,933 | 37.568627 | 80 | py |
models | models-master/official/vision/examples/starter/example_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example task definition for image classification."""
from typing import Any, List, Optional, Tuple, Sequence, Mapping
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.dataloaders import input_reader_factory
from official.vision.examples.starter import example_config as exp_cfg
from official.vision.examples.starter import example_input
from official.vision.examples.starter import example_model
@task_factory.register_task_cls(exp_cfg.ExampleTask)
class ExampleTask(base_task.Task):
"""Class of an example task.
A task is a subclass of base_task.Task that defines model, input, loss, metric
and one training and evaluation step, etc.
"""
def build_model(self) -> tf.keras.Model:
"""Builds a model."""
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self.task_config.model.input_size)
model = example_model.build_example_model(
input_specs=input_specs, model_config=self.task_config.model)
return model
def build_inputs(
self,
params: exp_cfg.ExampleDataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds input.
The input from this function is a tf.data.Dataset that has gone through
pre-processing steps, such as augmentation, batching, shuffling, etc.
Args:
params: The experiment config.
input_context: An optional InputContext used by input reader.
Returns:
A tf.data.Dataset object.
"""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
decoder = example_input.Decoder()
parser = example_input.Parser(
output_size=input_size[:2], num_classes=num_classes)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses: Optional[Any] = None) -> tf.Tensor:
"""Builds losses for training and validation.
Args:
labels: Input groundt-ruth labels.
model_outputs: Output of the model.
aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.
Returns:
The total loss tensor.
"""
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation.
This function builds and returns a list of metrics to compute during
training and validation. The list contains objects of subclasses of
tf.keras.metrics.Metric. Training and validation can have different metrics.
Args:
training: Whether the metric is for training or not.
Returns:
A list of tf.keras.metrics.Metric objects.
"""
k = self.task_config.evaluation.top_k
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Mapping[str, Any]:
"""Does forward and backward.
This example assumes input is a tuple of (features, labels), which follows
the output from data loader, i.e., Parser. The output from Parser is fed
into train_step to perform one step forward and backward pass. Other data
structure, such as dictionary, can also be used, as long as it is consistent
between output from Parser and input used here.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None) -> Mapping[str, Any]:
"""Runs validation step.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model) -> Any:
"""Performs the forward step. It is used in 'validation_step'."""
return model(inputs, training=False)
| 7,652 | 35.442857 | 80 | py |
models | models-master/official/vision/serving/export_base_v2.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
from typing import Dict, Optional, Text, Callable, Any, Union
import tensorflow as tf
from official.core import export_base
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g.
tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.input_signature = input_signature
@tf.function
def serve(self, inputs):
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
| 2,731 | 34.947368 | 79 | py |
models | models-master/official/vision/serving/export_base_v2_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.export_base_v2."""
import os
import tensorflow as tf
from official.core import export_base
from official.vision.serving import export_base_v2
class TestModel(tf.keras.Model):
def __init__(self):
super().__init__()
self._dense = tf.keras.layers.Dense(2)
def call(self, inputs):
return {'outputs': self._dense(inputs)}
class ExportBaseTest(tf.test.TestCase):
def test_preprocessor(self):
tmp_dir = self.get_temp_dir()
model = TestModel()
inputs = tf.ones([2, 4], tf.float32)
preprocess_fn = lambda inputs: 2 * inputs
module = export_base_v2.ExportModule(
params=None,
input_signature=tf.TensorSpec(shape=[2, 4]),
model=model,
preprocessor=preprocess_fn)
expected_output = model(preprocess_fn(inputs))
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['serving_default'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
imported = tf.saved_model.load(export_dir)
output = imported.signatures['serving_default'](inputs)
print('output', output)
self.assertAllClose(
output['outputs'].numpy(), expected_output['outputs'].numpy())
def test_postprocessor(self):
tmp_dir = self.get_temp_dir()
model = TestModel()
inputs = tf.ones([2, 4], tf.float32)
postprocess_fn = lambda logits: {'outputs': 2 * logits['outputs']}
module = export_base_v2.ExportModule(
params=None,
model=model,
input_signature=tf.TensorSpec(shape=[2, 4]),
postprocessor=postprocess_fn)
expected_output = postprocess_fn(model(inputs))
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['serving_default'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
imported = tf.saved_model.load(export_dir)
output = imported.signatures['serving_default'](inputs)
self.assertAllClose(
output['outputs'].numpy(), expected_output['outputs'].numpy())
if __name__ == '__main__':
tf.test.main()
| 2,870 | 30.9 | 74 | py |
models | models-master/official/vision/serving/export_tfhub_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export a TF-Hub SavedModel."""
from typing import List, Optional
# Import libraries
import tensorflow as tf
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision.modeling import factory
def build_model(batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
num_channels: int = 3,
skip_logits_layer: bool = False) -> tf.keras.Model:
"""Builds a model for TF Hub export.
Args:
batch_size: The batch size of input.
input_image_size: A list of [height, width] specifying the input image size.
params: The config used to train the model.
num_channels: The number of input image channels.
skip_logits_layer: Whether to skip the logits layer for image classification
model. Default is False.
Returns:
A tf.keras.Model instance.
Raises:
ValueError: If the task is not supported.
"""
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None,
skip_logits_layer=skip_logits_layer)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return model
def export_model_to_tfhub(batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_path: str,
num_channels: int = 3,
skip_logits_layer: bool = False):
"""Export a TF2 model to TF-Hub."""
model = build_model(batch_size, input_image_size, params, num_channels,
skip_logits_layer)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(checkpoint_path).assert_existing_objects_matched()
model.save(export_path, include_optimizer=False, save_format='tf')
| 2,870 | 36.776316 | 80 | py |
models | models-master/official/vision/serving/export_tflite_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to facilitate TFLite model conversion."""
import functools
from typing import Iterator, List, Optional
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision import tasks
def create_representative_dataset(
params: cfg.ExperimentConfig,
task: Optional[base_task.Task] = None) -> tf.data.Dataset:
"""Creates a tf.data.Dataset to load images for representative dataset.
Args:
params: An ExperimentConfig.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
Returns:
A tf.data.Dataset instance.
Raises:
ValueError: If task is not supported.
"""
if task is None:
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
task = tasks.image_classification.ImageClassificationTask(params.task)
elif isinstance(params.task, configs.retinanet.RetinaNetTask):
task = tasks.retinanet.RetinaNetTask(params.task)
elif isinstance(params.task, configs.maskrcnn.MaskRCNNTask):
task = tasks.maskrcnn.MaskRCNNTask(params.task)
elif isinstance(params.task,
configs.semantic_segmentation.SemanticSegmentationTask):
task = tasks.semantic_segmentation.SemanticSegmentationTask(params.task)
else:
raise ValueError('Task {} not supported.'.format(type(params.task)))
# Ensure batch size is 1 for TFLite model.
params.task.train_data.global_batch_size = 1
params.task.train_data.dtype = 'float32'
logging.info('Task config: %s', params.task.as_dict())
return task.build_inputs(params=params.task.train_data)
def representative_dataset(
params: cfg.ExperimentConfig,
task: Optional[base_task.Task] = None,
calibration_steps: int = 2000) -> Iterator[List[tf.Tensor]]:
""""Creates representative dataset for input calibration.
Args:
params: An ExperimentConfig.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
calibration_steps: The steps to do calibration.
Yields:
An input image tensor.
"""
dataset = create_representative_dataset(params=params, task=task)
for image, _ in dataset.take(calibration_steps):
# Skip images that do not have 3 channels.
if image.shape[-1] != 3:
continue
yield [image]
def convert_tflite_model(
saved_model_dir: Optional[str] = None,
model: Optional[tf.keras.Model] = None,
quant_type: Optional[str] = None,
params: Optional[cfg.ExperimentConfig] = None,
task: Optional[base_task.Task] = None,
calibration_steps: Optional[int] = 2000,
denylisted_ops: Optional[List[str]] = None,
) -> 'bytes':
"""Converts and returns a TFLite model.
Args:
saved_model_dir: The directory to the SavedModel.
model: An optional tf.keras.Model instance. If `saved_model_dir` is not
available, convert this model to TFLite.
quant_type: The post training quantization (PTQ) method. It can be one of
`default` (dynamic range), `fp16` (float16), `int8` (integer wih float
fallback), `int8_full` (integer only) and None (no quantization).
params: An optional ExperimentConfig to load and preprocess input images to
do calibration for integer quantization.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
calibration_steps: The steps to do calibration.
denylisted_ops: A list of strings containing ops that are excluded from
integer quantization.
Returns:
A converted TFLite model with optional PTQ.
Raises:
ValueError: If `representative_dataset_path` is not present if integer
quantization is requested, or both `saved_model_dir` or `model` are not
provided.
"""
if saved_model_dir:
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
elif model is not None:
converter = tf.lite.TFLiteConverter.from_keras_model(model)
else:
raise ValueError('Either `saved_model_dir` or `model` must be specified.')
if quant_type:
if quant_type.startswith('int8'):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = functools.partial(
representative_dataset,
params=params,
task=task,
calibration_steps=calibration_steps)
if quant_type.startswith('int8_full'):
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
if quant_type == 'int8_full':
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
if quant_type == 'int8_full_int8_io':
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
if denylisted_ops:
debug_options = tf.lite.experimental.QuantizationDebugOptions(
denylisted_ops=denylisted_ops)
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter,
debug_dataset=functools.partial(
representative_dataset,
params=params,
calibration_steps=calibration_steps),
debug_options=debug_options)
debugger.run()
return debugger.get_nondebug_quantized_model()
elif quant_type == 'uint8':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.default_ranges_stats = (-10, 10)
converter.inference_type = tf.uint8
converter.quantized_input_stats = {'input_placeholder': (0., 1.)}
elif quant_type == 'fp16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif quant_type in ('default', 'qat_fp32_io'):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
elif quant_type == 'qat':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
else:
raise ValueError(f'quantization type {quant_type} is not supported.')
return converter.convert()
| 6,907 | 37.592179 | 80 | py |
models | models-master/official/vision/serving/export_module_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for vision export modules."""
from typing import List, Optional
import tensorflow as tf
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_base_v2 as export_base
from official.vision.serving import export_utils
def create_classification_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Creats classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(
shape=[batch_size] + input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(
inputs, input_image_size, num_channels)
images = tf.map_fn(
preprocess_image_fn, elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels],
dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = export_base.ExportModule(params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3) -> export_base.ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(
params, input_type, batch_size, input_image_size, num_channels)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module
| 3,539 | 38.333333 | 75 | py |
models | models-master/official/vision/serving/export_module_factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for vision modules."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.core import export_base
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.dataloaders import classification_input
from official.vision.serving import export_module_factory
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self, input_type, input_image_size):
params = exp_factory.get_exp_config('resnet_imagenet')
params.task.model.backbone.resnet.model_id = 18
module = export_module_factory.create_classification_export_module(
params, input_type, batch_size=1, input_image_size=input_image_size)
return module
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 32, 32, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((32, 32, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type='image_tensor'):
input_image_size = [32, 32]
tmp_dir = self.get_temp_dir()
module = self._get_classification_module(input_type, input_image_size)
# Test that the model restores any attrs that are trackable objects
# (eg: tables, resource variables, keras models/layers, tf.hub modules).
module.model.test_trackable = tf.keras.layers.InputLayer(input_shape=(4,))
ckpt_path = tf.train.Checkpoint(model=module.model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, [input_type],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')))
imported = tf.saved_model.load(export_dir)
classification_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(
inputs, input_image_size, num_channels=3)
processed_images = tf.map_fn(
preprocess_image_fn,
elems=tf.zeros([1] + input_image_size + [3], dtype=tf.uint8),
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [3], dtype=tf.float32))
expected_logits = module.model(processed_images, training=False)
expected_prob = tf.nn.softmax(expected_logits)
out = classification_fn(tf.constant(images))
# The imported model should contain any trackable attrs that the original
# model had.
self.assertTrue(hasattr(imported.model, 'test_trackable'))
self.assertAllClose(
out['logits'].numpy(), expected_logits.numpy(), rtol=1e-04, atol=1e-04)
self.assertAllClose(
out['probs'].numpy(), expected_prob.numpy(), rtol=1e-04, atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 4,595 | 37.949153 | 79 | py |
models | models-master/official/vision/serving/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation input and model functions for serving/inference."""
import tensorflow as tf
from official.vision.modeling import factory
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class SegmentationModule(export_base.ExportModule):
"""Segmentation Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return factory.build_segmentation_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def _build_inputs(self, image):
"""Builds classification model inputs for serving."""
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
if self.params.task.train_data.preserve_aspect_ratio:
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._input_image_size,
padded_size=self._input_image_size,
aug_scale_min=1.0,
aug_scale_max=1.0)
else:
image, image_info = preprocess_ops.resize_image(image,
self._input_image_size)
return image, image_info
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
image_info = None
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.inference_step(images)
# Optionally resize prediction to the input image size.
if self.params.task.export_config.rescale_output:
logits = outputs['logits']
if logits.shape[0] != 1:
raise ValueError('Batch size cannot be more than 1.')
image_shape = tf.cast(image_info[0, 0, :], tf.int32)
if self.params.task.train_data.preserve_aspect_ratio:
rescale_size = tf.cast(
tf.math.ceil(image_info[0, 1, :] / image_info[0, 2, :]), tf.int32)
offsets = tf.cast(image_info[0, 3, :], tf.int32)
logits = tf.image.resize(logits, rescale_size, method='bilinear')
outputs['logits'] = tf.image.crop_to_bounding_box(
logits, offsets[0], offsets[1], image_shape[0], image_shape[1])
else:
outputs['logits'] = tf.image.resize(
logits, [image_shape[0], image_shape[1]], method='bilinear')
else:
outputs['logits'] = tf.image.resize(
outputs['logits'], self._input_image_size, method='bilinear')
if image_info is not None:
outputs.update({'image_info': image_info})
return outputs
| 4,013 | 36.166667 | 79 | py |
models | models-master/official/vision/serving/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for image classification export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import image_classification
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self, input_type):
params = exp_factory.get_exp_config('resnet_imagenet')
params.task.model.backbone.resnet.model_id = 18
classification_module = image_classification.ClassificationModule(
params,
batch_size=1,
input_image_size=[224, 224],
input_type=input_type)
return classification_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module,
save_directory,
signatures=signatures)
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 224, 224, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((224, 224, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((224, 224, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
elif input_type == 'tflite':
return tf.zeros((1, 224, 224, 3), dtype=np.float32)
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
{'input_type': 'tflite'},
)
def test_export(self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
module = self._get_classification_module(input_type)
# Test that the model restores any attrs that are trackable objects
# (eg: tables, resource variables, keras models/layers, tf.hub modules).
module.model.test_trackable = tf.keras.layers.InputLayer(input_shape=(4,))
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
classification_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
if input_type != 'tflite':
processed_images = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._build_inputs,
elems=tf.zeros((1, 224, 224, 3), dtype=tf.uint8),
fn_output_signature=tf.TensorSpec(
shape=[224, 224, 3], dtype=tf.float32)))
else:
processed_images = images
expected_logits = module.model(processed_images, training=False)
expected_prob = tf.nn.softmax(expected_logits)
out = classification_fn(tf.constant(images))
# The imported model should contain any trackable attrs that the original
# model had.
self.assertTrue(hasattr(imported.model, 'test_trackable'))
self.assertAllClose(out['logits'].numpy(), expected_logits.numpy())
self.assertAllClose(out['probs'].numpy(), expected_prob.numpy())
if __name__ == '__main__':
tf.test.main()
| 4,612 | 37.123967 | 79 | py |
models | models-master/official/vision/serving/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification input and model functions for serving/inference."""
import tensorflow as tf
from official.vision.modeling import factory
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class ClassificationModule(export_base.ExportModule):
"""classification Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return factory.build_classification_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def _build_inputs(self, image):
"""Builds classification model inputs for serving."""
# Center crops and resizes image.
if self.params.task.train_data.aug_crop:
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, self._input_image_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.reshape(
image, [self._input_image_size[0], self._input_image_size[1], 3])
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
return image
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32),
parallel_iterations=32))
logits = self.inference_step(images)
if self.params.task.train_data.is_multilabel:
probs = tf.math.sigmoid(logits)
else:
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
| 2,858 | 33.445783 | 79 | py |
models | models-master/official/vision/serving/export_base.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
import abc
from typing import Dict, List, Mapping, Optional, Text
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
batch_size: int,
input_image_size: List[int],
input_type: str = 'image_tensor',
num_channels: int = 3,
model: Optional[tf.keras.Model] = None,
input_name: Optional[str] = None):
"""Initializes a module for export.
Args:
params: Experiment params.
batch_size: The batch size of the model input. Can be `int` or None.
input_image_size: List or Tuple of size of the input image. For 2D image,
it is [height, width].
input_type: The input signature type.
num_channels: The number of the image channels.
model: A tf.keras.Model instance to be exported.
input_name: A customized input tensor name.
"""
self.params = params
self._batch_size = batch_size
self._input_image_size = input_image_size
self._num_channels = num_channels
self._input_type = input_type
self._input_name = input_name
if model is None:
model = self._build_model() # pylint: disable=assignment-from-none
super().__init__(params=params, model=model)
def _decode_image(self, encoded_image_bytes: str) -> tf.Tensor:
"""Decodes an image bytes to an image tensor.
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
and reshape it to desire shape.
Args:
encoded_image_bytes: An encoded image string to be decoded.
Returns:
A decoded image tensor.
"""
if len(self._input_image_size) == 2:
# Decode an image if 2D input is expected.
image_tensor = tf.image.decode_image(
encoded_image_bytes, channels=self._num_channels)
image_tensor.set_shape((None, None, self._num_channels))
else:
# Convert raw bytes into a tensor and reshape it, if not 2D input.
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
image_tensor = tf.reshape(image_tensor,
self._input_image_size + [self._num_channels])
return image_tensor
def _decode_tf_example(
self, tf_example_string_tensor: tf.train.Example) -> tf.Tensor:
"""Decodes a TF Example to an image tensor.
Args:
tf_example_string_tensor: A tf.train.Example of encoded image and other
information.
Returns:
A decoded image tensor.
"""
keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=keys_to_features)
image_tensor = self._decode_image(parsed_tensors['image/encoded'])
image_tensor.set_shape(
[None] * len(self._input_image_size) + [self._num_channels]
)
return image_tensor
def _build_model(self, **kwargs):
"""Returns a model built from the params."""
return None
@tf.function
def inference_from_image_tensors(
self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_for_tflite(self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_from_image_bytes(self, inputs: tf.Tensor):
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_image,
elems=inputs,
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
@tf.function
def inference_from_tf_example(self,
inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_tf_example,
elems=inputs,
# Height/width of the shape of input images is unspecified (None)
# at the time of decoding the example, but the shape will
# be adjusted to conform to the input layer of the model,
# by _run_inference_on_image_tensors() below.
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
dtype=tf.uint8,
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for key, def_name in function_keys.items():
if key == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + [None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8,
name=self._input_name)
signatures[
def_name] = self.inference_from_image_tensors.get_concrete_function(
input_signature)
elif key == 'image_bytes':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string, name=self._input_name)
signatures[
def_name] = self.inference_from_image_bytes.get_concrete_function(
input_signature)
elif key == 'serve_examples' or key == 'tf_example':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string, name=self._input_name)
signatures[
def_name] = self.inference_from_tf_example.get_concrete_function(
input_signature)
elif key == 'tflite':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + self._input_image_size +
[self._num_channels],
dtype=tf.float32,
name=self._input_name)
signatures[def_name] = self.inference_for_tflite.get_concrete_function(
input_signature)
else:
raise ValueError('Unrecognized `input_type`')
return signatures
| 7,531 | 36.66 | 80 | py |
models | models-master/official/vision/serving/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection input and model functions for serving/inference."""
from typing import Mapping, Tuple
from absl import logging
import tensorflow as tf
from official.vision import configs
from official.vision.modeling import factory
from official.vision.ops import anchor
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class DetectionModule(export_base.ExportModule):
"""Detection Module."""
def _build_model(self):
nms_versions_supporting_dynamic_batch_size = {'batched', 'v2', 'v3'}
nms_version = self.params.task.model.detection_generator.nms_version
if (self._batch_size is None and
nms_version not in nms_versions_supporting_dynamic_batch_size):
logging.info('nms_version is set to `batched` because `%s` '
'does not support with dynamic batch size.', nms_version)
self.params.task.model.detection_generator.nms_version = 'batched'
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
if isinstance(self.params.task.model, configs.maskrcnn.MaskRCNN):
model = factory.build_maskrcnn(
input_specs=input_specs, model_config=self.params.task.model)
elif isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model = factory.build_retinanet(
input_specs=input_specs, model_config=self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
def _build_anchor_boxes(self):
"""Builds and returns anchor boxes."""
model_params = self.params.task.model
input_anchor = anchor.build_anchor_generator(
min_level=model_params.min_level,
max_level=model_params.max_level,
num_scales=model_params.anchor.num_scales,
aspect_ratios=model_params.anchor.aspect_ratios,
anchor_size=model_params.anchor.anchor_size)
return input_anchor(
image_size=(self._input_image_size[0], self._input_image_size[1]))
def _build_inputs(self, image):
"""Builds detection model inputs for serving."""
model_params = self.params.task.model
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._input_image_size,
padded_size=preprocess_ops.compute_padded_size(
self._input_image_size, 2**model_params.max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
anchor_boxes = self._build_anchor_boxes()
return image, anchor_boxes, image_info
def _normalize_coordinates(self, detections_dict, dict_keys, image_info):
"""Normalizes detection coordinates between 0 and 1.
Args:
detections_dict: Dictionary containing the output of the model prediction.
dict_keys: Key names corresponding to the tensors of the output dictionary
that we want to update.
image_info: Tensor containing the details of the image resizing.
Returns:
detections_dict: Updated detection dictionary.
"""
for key in dict_keys:
if key not in detections_dict:
continue
detection_boxes = detections_dict[key] / tf.tile(
image_info[:, 2:3, :], [1, 1, 2]
)
detections_dict[key] = box_ops.normalize_boxes(
detection_boxes, image_info[:, 0:1, :]
)
detections_dict[key] = tf.clip_by_value(detections_dict[key], 0.0, 1.0)
return detections_dict
def preprocess(
self, images: tf.Tensor
) -> Tuple[tf.Tensor, Mapping[str, tf.Tensor], tf.Tensor]:
"""Preprocesses inputs to be suitable for the model.
Args:
images: The images tensor.
Returns:
images: The images tensor cast to float.
anchor_boxes: Dict mapping anchor levels to anchor boxes.
image_info: Tensor containing the details of the image resizing.
"""
model_params = self.params.task.model
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec = tf.TensorSpec(shape=self._input_image_size + [3],
dtype=tf.float32)
num_anchors = model_params.anchor.num_scales * len(
model_params.anchor.aspect_ratios) * 4
anchor_shapes = []
for level in range(model_params.min_level, model_params.max_level + 1):
anchor_level_spec = tf.TensorSpec(
shape=[
self._input_image_size[0] // 2**level,
self._input_image_size[1] // 2**level, num_anchors
],
dtype=tf.float32)
anchor_shapes.append((str(level), anchor_level_spec))
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, anchor_boxes, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, dict(anchor_shapes),
image_info_spec),
parallel_iterations=32))
return images, anchor_boxes, image_info
def serve(self, images: tf.Tensor):
"""Casts image to float and runs inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
if self._input_type != 'tflite':
images, anchor_boxes, image_info = self.preprocess(images)
else:
with tf.device('cpu:0'):
anchor_boxes = self._build_anchor_boxes()
# image_info is a 3D tensor of shape [batch_size, 4, 2]. It is in the
# format of [[original_height, original_width],
# [desired_height, desired_width], [y_scale, x_scale],
# [y_offset, x_offset]]. When input_type is tflite, input image is
# supposed to be preprocessed already.
image_info = tf.convert_to_tensor([[
self._input_image_size, self._input_image_size, [1.0, 1.0], [0, 0]
]],
dtype=tf.float32)
input_image_shape = image_info[:, 1, :]
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
model_call_kwargs = {
'images': images,
'image_shape': input_image_shape,
'anchor_boxes': anchor_boxes,
'training': False,
}
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model_call_kwargs['output_intermediate_features'] = (
self.params.task.export_config.output_intermediate_features
)
detections = self.model.call(**model_call_kwargs)
if self.params.task.model.detection_generator.apply_nms:
# For RetinaNet model, apply export_config.
# TODO(huizhongc): Add export_config to fasterrcnn and maskrcnn as needed.
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
export_config = self.params.task.export_config
# Normalize detection box coordinates to [0, 1].
if export_config.output_normalized_coordinates:
keys = ['detection_boxes', 'detection_outer_boxes']
detections = self._normalize_coordinates(detections, keys, image_info)
# Cast num_detections and detection_classes to float. This allows the
# model inference to work on chain (go/chain) as chain requires floating
# point outputs.
if export_config.cast_num_detections_to_float:
detections['num_detections'] = tf.cast(
detections['num_detections'], dtype=tf.float32)
if export_config.cast_detection_classes_to_float:
detections['detection_classes'] = tf.cast(
detections['detection_classes'], dtype=tf.float32)
final_outputs = {
'detection_boxes': detections['detection_boxes'],
'detection_scores': detections['detection_scores'],
'detection_classes': detections['detection_classes'],
'num_detections': detections['num_detections']
}
if 'detection_outer_boxes' in detections:
final_outputs['detection_outer_boxes'] = (
detections['detection_outer_boxes'])
else:
# For RetinaNet model, apply export_config.
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
export_config = self.params.task.export_config
# Normalize detection box coordinates to [0, 1].
if export_config.output_normalized_coordinates:
keys = ['decoded_boxes']
detections = self._normalize_coordinates(detections, keys, image_info)
final_outputs = {
'decoded_boxes': detections['decoded_boxes'],
'decoded_box_scores': detections['decoded_box_scores']
}
if 'detection_masks' in detections.keys():
final_outputs['detection_masks'] = detections['detection_masks']
if (
isinstance(self.params.task.model, configs.retinanet.RetinaNet)
and self.params.task.export_config.output_intermediate_features
):
final_outputs.update(
{
k: v
for k, v in detections.items()
if k.startswith('backbone_') or k.startswith('decoder_')
}
)
if self.params.task.model.detection_generator.nms_version != 'tflite':
final_outputs.update({'image_info': image_info})
return final_outputs
| 10,516 | 39.45 | 80 | py |
models | models-master/official/vision/configs/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
import dataclasses
import os
from typing import Optional, List, Sequence, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import base_config
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
# pylint: disable=missing-class-docstring
# Keep for backward compatibility.
@dataclasses.dataclass
class TfExampleDecoder(common.TfExampleDecoder):
"""A simple TF Example decoder config."""
# Keep for backward compatibility.
@dataclasses.dataclass
class TfExampleDecoderLabelMap(common.TfExampleDecoderLabelMap):
"""TF Example decoder with label map config."""
# Keep for backward compatibility.
@dataclasses.dataclass
class DataDecoder(common.DataDecoder):
"""Data decoder config."""
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
# Can choose AutoAugment and RandAugment.
aug_type: Optional[common.Augmentation] = None
# Keep for backward compatibility. Not used.
aug_policy: Optional[str] = None
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training.
Attributes:
weights: Sampling weights for each corresponding input_path. If used, then
input_path must be a config with matching keys.
"""
input_path: Union[Sequence[str], str, base_config.Config] = ''
weights: Optional[base_config.Config] = None
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: common.DataDecoder = dataclasses.field(
default_factory=common.DataDecoder
)
parser: Parser = dataclasses.field(default_factory=Parser)
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
loss_weight: float = 1.0
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
# Attribute heads of the same "prediction_tower_name" will share the same
# prediction tower. If unspecified, they will use their individual prediction
# tower.
prediction_tower_name: str = ''
# If `num_convs` or `num_filters` are not provided, it will use the parameters
# from RetinaNetHead. When several attributes share the head through setting
# the same `prediction_tower_name`, we only respect `num_convs` and
# `num_filters` from the first attribute that use the shared prediction tower
# name.
num_convs: Optional[int] = None
num_filters: Optional[int] = None
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: List[AttributeHead] = dataclasses.field(default_factory=list)
share_classification_heads: bool = False
share_level_convs: Optional[bool] = True
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
apply_nms: bool = True
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
nms_version: str = 'v2' # `v2`, `v1`, `batched`, or `tflite`.
use_cpu_nms: bool = False
soft_nms_sigma: Optional[float] = None # Only works when nms_version='v1'.
# When nms_version = `tflite`, values from tflite_post_processing need to be
# specified. They are compatible with the input arguments used by TFLite
# custom NMS op and override above parameters.
tflite_post_processing: common.TFLitePostProcessingConfig = dataclasses.field(
default_factory=common.TFLitePostProcessingConfig
)
# Return decoded boxes/scores even if apply_nms is set `True`.
return_decoded: Optional[bool] = None
# Only works when nms_version='v2'.
use_class_agnostic_nms: Optional[bool] = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = dataclasses.field(default_factory=Anchor)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder(type='fpn', fpn=decoders.FPN())
)
head: RetinaNetHead = dataclasses.field(default_factory=RetinaNetHead)
detection_generator: DetectionGenerator = dataclasses.field(
default_factory=DetectionGenerator
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class ExportConfig(hyperparams.Config):
output_normalized_coordinates: bool = False
cast_num_detections_to_float: bool = False
cast_detection_classes_to_float: bool = False
output_intermediate_features: bool = False
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = dataclasses.field(default_factory=RetinaNet)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
annotation_file: Optional[str] = None
per_category_metrics: bool = False
export_config: ExportConfig = dataclasses.field(default_factory=ExportConfig)
# If set, the COCO metrics will be computed.
use_coco_metrics: bool = True
# If set, the Waymo Open Dataset evaluator would be used.
use_wod_metrics: bool = False
# If set, freezes the backbone during training.
# TODO(crisnv) Add paper link when available.
freeze_backbone: bool = False
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
"""RetinaNet general config."""
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
norm_activation=common.NormActivation(use_sync_bn=False),
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.2)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.model.min_level == task.model.backbone.spinenet.min_level',
'task.model.max_level == task.model.backbone.spinenet.max_level',
])
return config
@exp_factory.register_config_factory('retinanet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
"""COCO object detection with mobile RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=False)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
return config
| 17,493 | 35.598326 | 95 | py |
Subsets and Splits